applied-ai-018 commited on
Commit
4d5056d
·
verified ·
1 Parent(s): 1cee3d8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__init__.py +79 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/convert_suno_to_hf.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/bark/modeling_bark.py +1908 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__init__.py +57 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/__init__.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/tokenization_code_llama.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/tokenization_code_llama_fast.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama.py +509 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama_fast.py +439 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__init__.py +73 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py +229 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py +719 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py +417 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py +273 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py +140 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py +1384 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py +895 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py +1139 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py +514 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py +176 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/convert_dpr_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_dpr.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_tf_dpr.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr_fast.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/convert_focalnet_to_hf_format.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/__init__.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/configuration_funnel.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/convert_funnel_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_funnel.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_tf_funnel.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel_fast.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel.py +534 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__init__.py +121 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/configuration_gemma.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__init__.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_torch_available,
20
+ )
21
+
22
+
23
+ _import_structure = {
24
+ "configuration_bark": [
25
+ "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP",
26
+ "BarkCoarseConfig",
27
+ "BarkConfig",
28
+ "BarkFineConfig",
29
+ "BarkSemanticConfig",
30
+ ],
31
+ "processing_bark": ["BarkProcessor"],
32
+ }
33
+
34
+ try:
35
+ if not is_torch_available():
36
+ raise OptionalDependencyNotAvailable()
37
+ except OptionalDependencyNotAvailable:
38
+ pass
39
+ else:
40
+ _import_structure["modeling_bark"] = [
41
+ "BARK_PRETRAINED_MODEL_ARCHIVE_LIST",
42
+ "BarkFineModel",
43
+ "BarkSemanticModel",
44
+ "BarkCoarseModel",
45
+ "BarkModel",
46
+ "BarkPreTrainedModel",
47
+ "BarkCausalModel",
48
+ ]
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_bark import (
52
+ BARK_PRETRAINED_CONFIG_ARCHIVE_MAP,
53
+ BarkCoarseConfig,
54
+ BarkConfig,
55
+ BarkFineConfig,
56
+ BarkSemanticConfig,
57
+ )
58
+ from .processing_bark import BarkProcessor
59
+
60
+ try:
61
+ if not is_torch_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ from .modeling_bark import (
67
+ BARK_PRETRAINED_MODEL_ARCHIVE_LIST,
68
+ BarkCausalModel,
69
+ BarkCoarseModel,
70
+ BarkFineModel,
71
+ BarkModel,
72
+ BarkPreTrainedModel,
73
+ BarkSemanticModel,
74
+ )
75
+
76
+ else:
77
+ import sys
78
+
79
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/convert_suno_to_hf.cpython-310.pyc ADDED
Binary file (6.75 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc ADDED
Binary file (55.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc ADDED
Binary file (9.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/bark/modeling_bark.py ADDED
@@ -0,0 +1,1908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch BARK model."""
16
+ import math
17
+ from typing import Dict, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from torch import nn
22
+ from torch.nn import functional as F
23
+
24
+ from ...generation.logits_process import (
25
+ AlternatingCodebooksLogitsProcessor,
26
+ BarkEosPrioritizerLogitsProcessor,
27
+ SuppressTokensLogitsProcessor,
28
+ )
29
+ from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
30
+ from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput
31
+ from ...modeling_utils import PreTrainedModel, get_parameter_device
32
+ from ...utils import (
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ is_accelerate_available,
36
+ is_flash_attn_2_available,
37
+ is_flash_attn_greater_or_equal_2_10,
38
+ logging,
39
+ )
40
+ from ..auto import AutoModel
41
+ from .configuration_bark import (
42
+ BarkCoarseConfig,
43
+ BarkConfig,
44
+ BarkFineConfig,
45
+ BarkSemanticConfig,
46
+ BarkSubModelConfig,
47
+ )
48
+ from .generation_configuration_bark import (
49
+ BarkCoarseGenerationConfig,
50
+ BarkFineGenerationConfig,
51
+ BarkSemanticGenerationConfig,
52
+ )
53
+
54
+
55
+ if is_flash_attn_2_available():
56
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
57
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+
63
+ _CHECKPOINT_FOR_DOC = "suno/bark-small"
64
+ _CONFIG_FOR_DOC = "BarkConfig"
65
+
66
+
67
+ from ..deprecated._archive_maps import BARK_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
68
+
69
+
70
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
71
+ def _get_unpad_data(attention_mask):
72
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
73
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
74
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
75
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
76
+ return (
77
+ indices,
78
+ cu_seqlens,
79
+ max_seqlen_in_batch,
80
+ )
81
+
82
+
83
+ class BarkSelfAttention(nn.Module):
84
+ # adapted from GPTNeoSelfAttention and Bark code
85
+ # BarkSelfAttention can have two attention type, i.e full attention or causal attention
86
+
87
+ def __init__(self, config, is_causal=False):
88
+ super().__init__()
89
+
90
+ # regularization
91
+ self.dropout = config.dropout
92
+ self.attn_dropout = nn.Dropout(config.dropout)
93
+ self.resid_dropout = nn.Dropout(config.dropout)
94
+
95
+ self.embed_dim = config.hidden_size
96
+ self.num_heads = config.num_heads
97
+ self.head_dim = self.embed_dim // self.num_heads
98
+
99
+ if config.hidden_size % config.num_heads != 0:
100
+ raise ValueError(
101
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
102
+ f" {self.num_heads})."
103
+ )
104
+
105
+ # key, query, value projections for all heads, but in a batch
106
+ self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias)
107
+ # output projection
108
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias)
109
+
110
+ self.is_causal = is_causal
111
+ if is_causal:
112
+ block_size = config.block_size
113
+ bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size)
114
+ self.register_buffer("bias", bias)
115
+
116
+ # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads
117
+ def _split_heads(self, tensor, num_heads, attn_head_size):
118
+ """
119
+ Splits hidden_size dim into attn_head_size and num_heads
120
+ """
121
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
122
+ tensor = tensor.view(new_shape)
123
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
124
+
125
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
126
+ """
127
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
128
+ """
129
+
130
+ # re-assemble all head outputs side by side
131
+ # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size)
132
+ tensor = tensor.transpose(1, 2).contiguous()
133
+ tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,))
134
+
135
+ return tensor
136
+
137
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
138
+ # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key
139
+ attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim))
140
+
141
+ if self.is_causal:
142
+ query_length, key_length = query.size(-2), key.size(-2)
143
+
144
+ # fill the upper left part of the attention weights with inf
145
+ attn_weights = attn_weights.masked_fill(
146
+ self.bias[:, :, key_length - query_length : key_length, :key_length] == 0,
147
+ torch.finfo(attn_weights.dtype).min,
148
+ )
149
+
150
+ if attention_mask is not None:
151
+ # Apply the attention mask
152
+ attn_weights = attn_weights + attention_mask
153
+
154
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
155
+ attn_weights = attn_weights.to(value.dtype)
156
+ attn_weights = self.attn_dropout(attn_weights)
157
+
158
+ # Mask heads if we want to
159
+ if head_mask is not None:
160
+ attn_weights = attn_weights * head_mask
161
+
162
+ # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size)
163
+ # -> (batch, num_heads, seq_len, attn_head_size)
164
+ attn_output = torch.matmul(attn_weights, value)
165
+
166
+ return attn_output, attn_weights
167
+
168
+ def forward(
169
+ self,
170
+ hidden_states,
171
+ attention_mask=None,
172
+ past_key_values=None,
173
+ head_mask=None,
174
+ use_cache=False,
175
+ output_attentions=False,
176
+ ):
177
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
178
+ query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2)
179
+
180
+ query = self._split_heads(query, self.num_heads, self.head_dim)
181
+ key = self._split_heads(key, self.num_heads, self.head_dim)
182
+ value = self._split_heads(value, self.num_heads, self.head_dim)
183
+
184
+ if past_key_values is not None:
185
+ past_key = past_key_values[0]
186
+ past_value = past_key_values[1]
187
+ key = torch.cat((past_key, key), dim=-2)
188
+ value = torch.cat((past_value, value), dim=-2)
189
+
190
+ if use_cache is True:
191
+ present = (key, value)
192
+ else:
193
+ present = None
194
+
195
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
196
+
197
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
198
+ attn_output = self.out_proj(attn_output)
199
+ attn_output = self.resid_dropout(attn_output)
200
+
201
+ outputs = (attn_output, present)
202
+ if output_attentions:
203
+ outputs += (attn_weights,)
204
+
205
+ return outputs
206
+
207
+
208
+ class BarkSelfFlashAttention2(BarkSelfAttention):
209
+ """
210
+ Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays
211
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
212
+ flash attention and deal with padding tokens in case the input contains any of them.
213
+ """
214
+
215
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
216
+ def __init__(self, *args, **kwargs):
217
+ super().__init__(*args, **kwargs)
218
+
219
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
220
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
221
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
222
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
223
+
224
+ def _split_heads(self, tensor, num_heads, attn_head_size):
225
+ """
226
+ Splits hidden_size dim into attn_head_size and num_heads
227
+ """
228
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
229
+ tensor = tensor.view(new_shape)
230
+ # Flash attention requires the input to have the shape
231
+ # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features)
232
+ return tensor
233
+
234
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
235
+ """
236
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
237
+ """
238
+ # re-assemble all head outputs side by side
239
+ # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size)
240
+ tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,))
241
+ return tensor
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states,
246
+ attention_mask=None,
247
+ past_key_values=None,
248
+ head_mask=None,
249
+ use_cache=False,
250
+ output_attentions=False,
251
+ ):
252
+ batch_size, query_len, _ = hidden_states.size()
253
+
254
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
255
+ query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2)
256
+
257
+ query = self._split_heads(query, self.num_heads, self.head_dim)
258
+ key = self._split_heads(key, self.num_heads, self.head_dim)
259
+ value = self._split_heads(value, self.num_heads, self.head_dim)
260
+
261
+ if past_key_values is not None:
262
+ # (batch, head, seq_length, head_features) -> (batch, seq_length, head, head_features)
263
+ past_key = past_key_values[0].transpose(1, 2)
264
+ past_value = past_key_values[1].transpose(1, 2)
265
+ # and merge on seq_length
266
+ key = torch.cat((past_key, key), dim=1)
267
+ value = torch.cat((past_value, value), dim=1)
268
+
269
+ if use_cache is True:
270
+ # (batch, head, seq_length, head_features)
271
+ present = (key.transpose(1, 2), value.transpose(1, 2))
272
+ else:
273
+ present = None
274
+
275
+ attn_output = self._flash_attention_forward(query, key, value, attention_mask, query_len, dropout=self.dropout)
276
+
277
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
278
+ attn_output = self.out_proj(attn_output)
279
+ attn_output = self.resid_dropout(attn_output)
280
+
281
+ outputs = (attn_output, present)
282
+ if output_attentions:
283
+ attn_weights = None
284
+ outputs += (attn_weights,)
285
+
286
+ return outputs
287
+
288
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
289
+ def _flash_attention_forward(
290
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
291
+ ):
292
+ """
293
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
294
+ first unpad the input, then computes the attention scores and pad the final attention scores.
295
+
296
+ Args:
297
+ query_states (`torch.Tensor`):
298
+ Input query states to be passed to Flash Attention API
299
+ key_states (`torch.Tensor`):
300
+ Input key states to be passed to Flash Attention API
301
+ value_states (`torch.Tensor`):
302
+ Input value states to be passed to Flash Attention API
303
+ attention_mask (`torch.Tensor`):
304
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
305
+ position of padding tokens and 1 for the position of non-padding tokens.
306
+ dropout (`float`):
307
+ Attention dropout
308
+ softmax_scale (`float`, *optional*):
309
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
310
+ """
311
+ if not self._flash_attn_uses_top_left_mask:
312
+ causal = self.is_causal
313
+ else:
314
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
315
+ causal = self.is_causal and query_length != 1
316
+
317
+ # Contains at least one padding token in the sequence
318
+ if attention_mask is not None:
319
+ batch_size = query_states.shape[0]
320
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
321
+ query_states, key_states, value_states, attention_mask, query_length
322
+ )
323
+
324
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
325
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
326
+
327
+ attn_output_unpad = flash_attn_varlen_func(
328
+ query_states,
329
+ key_states,
330
+ value_states,
331
+ cu_seqlens_q=cu_seqlens_q,
332
+ cu_seqlens_k=cu_seqlens_k,
333
+ max_seqlen_q=max_seqlen_in_batch_q,
334
+ max_seqlen_k=max_seqlen_in_batch_k,
335
+ dropout_p=dropout,
336
+ softmax_scale=softmax_scale,
337
+ causal=causal,
338
+ )
339
+
340
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
341
+ else:
342
+ attn_output = flash_attn_func(
343
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
344
+ )
345
+
346
+ return attn_output
347
+
348
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
349
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
350
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
351
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
352
+
353
+ key_layer = index_first_axis(
354
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
355
+ )
356
+ value_layer = index_first_axis(
357
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
358
+ )
359
+ if query_length == kv_seq_len:
360
+ query_layer = index_first_axis(
361
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
362
+ )
363
+ cu_seqlens_q = cu_seqlens_k
364
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
365
+ indices_q = indices_k
366
+ elif query_length == 1:
367
+ max_seqlen_in_batch_q = 1
368
+ cu_seqlens_q = torch.arange(
369
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
370
+ ) # There is a memcpy here, that is very bad.
371
+ indices_q = cu_seqlens_q[:-1]
372
+ query_layer = query_layer.squeeze(1)
373
+ else:
374
+ # The -q_len: slice assumes left padding.
375
+ attention_mask = attention_mask[:, -query_length:]
376
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
377
+
378
+ return (
379
+ query_layer,
380
+ key_layer,
381
+ value_layer,
382
+ indices_q,
383
+ (cu_seqlens_q, cu_seqlens_k),
384
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
385
+ )
386
+
387
+
388
+ BARK_ATTENTION_CLASSES = {
389
+ "eager": BarkSelfAttention,
390
+ "flash_attention_2": BarkSelfFlashAttention2,
391
+ }
392
+
393
+
394
+ class BarkLayerNorm(nn.Module):
395
+ """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False."""
396
+
397
+ def __init__(self, hidden_size, bias=True):
398
+ super().__init__()
399
+ self.weight = nn.Parameter(torch.ones(hidden_size))
400
+ self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None
401
+
402
+ def forward(self, input):
403
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-5)
404
+
405
+
406
+ class BarkMLP(nn.Module):
407
+ def __init__(self, config):
408
+ super().__init__()
409
+ self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias)
410
+ self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias)
411
+ self.dropout = nn.Dropout(config.dropout)
412
+ self.gelu = nn.GELU()
413
+
414
+ def forward(self, hidden_states):
415
+ hidden_states = self.in_proj(hidden_states)
416
+ hidden_states = self.gelu(hidden_states)
417
+ hidden_states = self.out_proj(hidden_states)
418
+ hidden_states = self.dropout(hidden_states)
419
+ return hidden_states
420
+
421
+
422
+ class BarkBlock(nn.Module):
423
+ def __init__(self, config, is_causal=False):
424
+ super().__init__()
425
+
426
+ if is_causal:
427
+ # if causal, uses handmade LayerNorm, so that the layerNorm bias is optional
428
+ # this handmade layerNorm is used to stick with Bark choice of leaving optional bias in
429
+ # AutoRegressive models (corresponding to the "Text" and the "Coarse" modules)
430
+ self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias)
431
+ self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias)
432
+ else:
433
+ self.layernorm_1 = nn.LayerNorm(config.hidden_size)
434
+ self.layernorm_2 = nn.LayerNorm(config.hidden_size)
435
+
436
+ self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](config, is_causal=is_causal)
437
+
438
+ self.mlp = BarkMLP(config)
439
+
440
+ def forward(
441
+ self,
442
+ hidden_states,
443
+ past_key_values=None,
444
+ attention_mask=None,
445
+ head_mask=None,
446
+ use_cache=False,
447
+ output_attentions=False,
448
+ ):
449
+ intermediary_hidden_states = self.layernorm_1(hidden_states)
450
+
451
+ attn_outputs = self.attn(
452
+ intermediary_hidden_states,
453
+ past_key_values=past_key_values,
454
+ attention_mask=attention_mask,
455
+ head_mask=head_mask,
456
+ use_cache=use_cache,
457
+ output_attentions=output_attentions,
458
+ )
459
+
460
+ attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights)
461
+ outputs = attn_outputs[1:]
462
+
463
+ intermediary_hidden_states = hidden_states + attn_output
464
+ intermediary_hidden_states = intermediary_hidden_states + self.mlp(
465
+ self.layernorm_2(intermediary_hidden_states)
466
+ )
467
+
468
+ if use_cache:
469
+ outputs = (intermediary_hidden_states,) + outputs
470
+ else:
471
+ outputs = (intermediary_hidden_states,) + outputs[1:]
472
+
473
+ return outputs # hidden_states, ((present), attentions)
474
+
475
+
476
+ class BarkPreTrainedModel(PreTrainedModel):
477
+ """
478
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
479
+ models.
480
+ """
481
+
482
+ config_class = BarkConfig
483
+ supports_gradient_checkpointing = False
484
+ _supports_flash_attn_2 = True
485
+
486
+ def _init_weights(self, module):
487
+ """Initialize the weights."""
488
+ if isinstance(module, (nn.Linear,)):
489
+ # Slightly different from the TF version which uses truncated_normal for initialization
490
+ # cf https://github.com/pytorch/pytorch/pull/5617
491
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
492
+ if module.bias is not None:
493
+ module.bias.data.zero_()
494
+ elif isinstance(module, nn.Embedding):
495
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
496
+ if module.padding_idx is not None:
497
+ module.weight.data[module.padding_idx].zero_()
498
+ elif isinstance(module, nn.LayerNorm):
499
+ module.bias.data.zero_()
500
+ module.weight.data.fill_(1.0)
501
+
502
+ def __init__(self, *inputs, **kwargs):
503
+ super().__init__(*inputs, **kwargs)
504
+
505
+ @property
506
+ def device(self) -> torch.device:
507
+ """
508
+ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same
509
+ device).
510
+ """
511
+
512
+ # if has _hf_hook, has been offloaded so the device has to be found in the hook
513
+ if not hasattr(self, "_hf_hook"):
514
+ return get_parameter_device(self)
515
+ for module in self.modules():
516
+ if (
517
+ hasattr(module, "_hf_hook")
518
+ and hasattr(module._hf_hook, "execution_device")
519
+ and module._hf_hook.execution_device is not None
520
+ ):
521
+ return torch.device(module._hf_hook.execution_device)
522
+
523
+ return get_parameter_device(self)
524
+
525
+
526
+ BARK_MODEL_START_DOCSTRING = """
527
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
528
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
529
+ etc.)
530
+
531
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
532
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
533
+ and behavior.
534
+
535
+ Parameters:
536
+ config ([`{config}`]):
537
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
538
+ load the weights associated with the model, only the configuration. Check out the
539
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
540
+ """
541
+
542
+
543
+ BARK_START_DOCSTRING = r"""
544
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
545
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
546
+ etc.)
547
+
548
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
549
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
550
+ and behavior.
551
+
552
+ Parameters:
553
+ config ([`BarkConfig`]):
554
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
555
+ load the weights associated with the model, only the configuration. Check out the
556
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
557
+ """
558
+
559
+
560
+ BARK_FINE_INPUTS_DOCSTRING = r"""
561
+ Args:
562
+ codebook_idx (`int`):
563
+ Index of the codebook that will be predicted.
564
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`):
565
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
566
+ it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is
567
+ predicted recursively by attending the previously predicted channels. The model predicts on windows of
568
+ length 1024.
569
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
570
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
571
+
572
+ - 1 for tokens that are **not masked**,
573
+ - 0 for tokens that are **masked**.
574
+
575
+ [What are attention masks?](../glossary#attention-mask)
576
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
577
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
578
+ config.max_position_embeddings - 1]`.
579
+
580
+ [What are position IDs?](../glossary#position-ids)
581
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
582
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
583
+
584
+ - 1 indicates the head is **not masked**,
585
+ - 0 indicates the head is **masked**.
586
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET.
587
+ input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*):
588
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If
589
+ `past_key_values` is used, optionally only the last `input_embeds` have to be input (see
590
+ `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into
591
+ associated vectors than the model's internal embedding lookup matrix.
592
+ output_attentions (`bool`, *optional*):
593
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
594
+ tensors for more detail.
595
+ output_hidden_states (`bool`, *optional*):
596
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
597
+ more detail.
598
+ return_dict (`bool`, *optional*):
599
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
600
+ """
601
+
602
+ BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = r"""
603
+ Args:
604
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
605
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
606
+ it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
607
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
608
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`):
609
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
610
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`.
611
+
612
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
613
+ `past_key_values` input) to speed up sequential decoding.
614
+
615
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
616
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
617
+ `input_ids` of shape `(batch_size, sequence_length)`.
618
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
619
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
620
+
621
+ - 1 for tokens that are **not masked**,
622
+ - 0 for tokens that are **masked**.
623
+
624
+ [What are attention masks?](../glossary#attention-mask)
625
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
626
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
627
+ config.max_position_embeddings - 1]`.
628
+
629
+ [What are position IDs?](../glossary#position-ids)
630
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
631
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
632
+
633
+ - 1 indicates the head is **not masked**,
634
+ - 0 indicates the head is **masked**.
635
+ input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*):
636
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
637
+ Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you
638
+ have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds`
639
+ is used in priority instead of `input_ids`.
640
+ use_cache (`bool`, *optional*):
641
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
642
+ `past_key_values`).
643
+ output_attentions (`bool`, *optional*):
644
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
645
+ tensors for more detail.
646
+ output_hidden_states (`bool`, *optional*):
647
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
648
+ more detail.
649
+ return_dict (`bool`, *optional*):
650
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
651
+ """
652
+
653
+
654
+ # GPT2-like autoregressive model
655
+ class BarkCausalModel(BarkPreTrainedModel):
656
+ config_class = BarkSubModelConfig
657
+
658
+ def __init__(self, config):
659
+ super().__init__(config)
660
+ self.config = config
661
+
662
+ # initialize as an autoregressive GPT-like model
663
+ self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size)
664
+ self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size)
665
+
666
+ self.drop = nn.Dropout(config.dropout)
667
+
668
+ self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)])
669
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
670
+
671
+ self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias)
672
+
673
+ self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False)
674
+ self.gradient_checkpointing = False
675
+
676
+ # Initialize weights and apply final processing
677
+ self.post_init()
678
+
679
+ def get_input_embeddings(self):
680
+ return self.input_embeds_layer
681
+
682
+ def set_input_embeddings(self, new_embeddings):
683
+ self.input_embeds_layer = new_embeddings
684
+
685
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
686
+ input_embeds = kwargs.get("input_embeds", None)
687
+
688
+ attention_mask = kwargs.get("attention_mask", None)
689
+ position_ids = kwargs.get("position_ids", None)
690
+
691
+ if past_key_values is not None:
692
+ # Omit tokens covered by past_key_values
693
+ seq_len = input_ids.shape[1]
694
+ past_length = past_key_values[0][0].shape[2]
695
+
696
+ # Some generation methods already pass only the last input ID
697
+ if input_ids.shape[1] > past_length:
698
+ remove_prefix_length = past_length
699
+ else:
700
+ # Default to old behavior: keep only final ID
701
+ remove_prefix_length = input_ids.shape[1] - 1
702
+
703
+ input_ids = input_ids[:, remove_prefix_length:]
704
+
705
+ # input_embeds have already been used and is not required anymore
706
+ input_embeds = None
707
+ else:
708
+ if input_embeds is not None and kwargs.get("use_cache"):
709
+ seq_len = input_embeds.shape[1]
710
+ else:
711
+ seq_len = input_ids.shape[1]
712
+
713
+ # ensure that attention_mask and position_ids shapes are aligned with the weird Bark hack of reducing
714
+ # sequence length on the first forward pass
715
+ if attention_mask is not None:
716
+ attention_mask = attention_mask[:, :seq_len]
717
+ if position_ids is not None:
718
+ position_ids = position_ids[:, :seq_len]
719
+
720
+ if attention_mask is not None and position_ids is None:
721
+ # create position_ids on the fly for batch generation
722
+ position_ids = attention_mask.long().cumsum(-1) - 1
723
+ position_ids.masked_fill_(attention_mask == 0, 1)
724
+ if past_key_values:
725
+ position_ids = position_ids[:, -input_ids.shape[1] :]
726
+ else:
727
+ position_ids = None
728
+
729
+ if input_embeds is not None and kwargs.get("use_cache"):
730
+ return {
731
+ "input_ids": None,
732
+ "input_embeds": input_embeds,
733
+ "past_key_values": past_key_values,
734
+ "use_cache": kwargs.get("use_cache"),
735
+ "position_ids": position_ids,
736
+ "attention_mask": attention_mask,
737
+ }
738
+ return {
739
+ "input_ids": input_ids,
740
+ "past_key_values": past_key_values,
741
+ "use_cache": kwargs.get("use_cache"),
742
+ "position_ids": position_ids,
743
+ "attention_mask": attention_mask,
744
+ }
745
+
746
+ @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING)
747
+ def forward(
748
+ self,
749
+ input_ids: Optional[torch.Tensor] = None,
750
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
751
+ attention_mask: Optional[torch.Tensor] = None,
752
+ position_ids: Optional[torch.Tensor] = None,
753
+ head_mask: Optional[torch.Tensor] = None,
754
+ labels: Optional[torch.LongTensor] = None,
755
+ input_embeds: Optional[torch.Tensor] = None,
756
+ use_cache: Optional[bool] = None,
757
+ output_attentions: Optional[bool] = None,
758
+ output_hidden_states: Optional[bool] = None,
759
+ return_dict: Optional[bool] = None,
760
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
761
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
762
+ output_hidden_states = (
763
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
764
+ )
765
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
766
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
767
+
768
+ # Verify if input_embeds already exists
769
+ # then compute embeddings.
770
+ if input_ids is not None and input_embeds is not None:
771
+ raise ValueError("You cannot specify both input_ids and input_embeds at the same time")
772
+ elif input_embeds is not None and past_key_values is None:
773
+ # we want to return the input_embeds in priority so that it is in line with a weird hack
774
+ # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model
775
+ pass
776
+ elif input_ids is not None:
777
+ input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd)
778
+ elif input_embeds is not None:
779
+ pass
780
+ else:
781
+ raise ValueError("You have to specify either input_ids or input_embeds")
782
+
783
+ input_shape = input_embeds.size()[:-1]
784
+ batch_size = input_embeds.shape[0]
785
+ seq_length = input_shape[-1]
786
+
787
+ device = input_ids.device if input_ids is not None else input_embeds.device
788
+
789
+ if past_key_values is None:
790
+ past_length = 0
791
+ past_key_values = tuple([None] * len(self.layers))
792
+ else:
793
+ past_length = past_key_values[0][0].size(-2)
794
+
795
+ if position_ids is None:
796
+ position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device)
797
+ position_ids = position_ids.unsqueeze(0) # shape (1, seq_length)
798
+
799
+ position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd)
800
+
801
+ # Attention mask.
802
+ if attention_mask is not None:
803
+ if batch_size <= 0:
804
+ raise ValueError("batch_size has to be defined and > 0")
805
+ if self._use_flash_attention_2:
806
+ attention_mask = attention_mask if 0 in attention_mask else None
807
+ else:
808
+ attention_mask = attention_mask.view(batch_size, -1)
809
+ # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length]
810
+ # from_seq_length is 1 to easily broadcast
811
+ attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1)
812
+
813
+ # Prepare head mask if needed
814
+ # 1.0 in head_mask indicate we keep the head
815
+ # attention_probs has shape bsz x num_heads x N x N
816
+ # head_mask has shape num_layers x batch x num_heads x N x N
817
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
818
+
819
+ hidden_states = self.drop(input_embeds + position_embeds)
820
+ output_shape = input_shape + (hidden_states.size(-1),)
821
+
822
+ if self.gradient_checkpointing and self.training:
823
+ if use_cache:
824
+ logger.warning_once(
825
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
826
+ )
827
+ use_cache = False
828
+
829
+ present_key_values = () if use_cache else None
830
+ all_self_attentions = () if output_attentions else None
831
+ all_hidden_states = () if output_hidden_states else None
832
+
833
+ for i, (block, past_layer_key_values) in enumerate(zip(self.layers, past_key_values)):
834
+ if output_hidden_states:
835
+ all_hidden_states = all_hidden_states + (hidden_states,)
836
+
837
+ if self.gradient_checkpointing and self.training:
838
+ outputs = self._gradient_checkpointing_func(
839
+ block.__call__,
840
+ hidden_states,
841
+ None,
842
+ attention_mask,
843
+ head_mask[i],
844
+ use_cache,
845
+ output_attentions,
846
+ )
847
+ else:
848
+ outputs = block(
849
+ hidden_states,
850
+ past_key_values=past_layer_key_values,
851
+ attention_mask=attention_mask,
852
+ head_mask=head_mask[i],
853
+ use_cache=use_cache,
854
+ output_attentions=output_attentions,
855
+ )
856
+
857
+ hidden_states = outputs[0]
858
+
859
+ if use_cache:
860
+ present_key_values = present_key_values + (outputs[1],)
861
+
862
+ if output_attentions:
863
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
864
+
865
+ hidden_states = self.layernorm_final(hidden_states)
866
+
867
+ hidden_states = hidden_states.view(output_shape)
868
+
869
+ # Add last hidden state
870
+ if output_hidden_states:
871
+ all_hidden_states = all_hidden_states + (hidden_states,)
872
+
873
+ logits = self.lm_head(hidden_states)
874
+
875
+ loss = None
876
+ if labels is not None:
877
+ raise NotImplementedError(
878
+ "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model."
879
+ )
880
+
881
+ if not return_dict:
882
+ return tuple(
883
+ v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None
884
+ )
885
+
886
+ return CausalLMOutputWithPast(
887
+ loss=loss,
888
+ logits=logits,
889
+ past_key_values=present_key_values,
890
+ hidden_states=all_hidden_states,
891
+ attentions=all_self_attentions,
892
+ )
893
+
894
+ @staticmethod
895
+ def _reorder_cache(
896
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
897
+ ) -> Tuple[Tuple[torch.Tensor]]:
898
+ """
899
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
900
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
901
+ beam_idx at every generation step.
902
+ """
903
+ # Necessary for beam_search
904
+ return tuple(
905
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
906
+ for layer_past in past_key_values
907
+ )
908
+
909
+
910
+ @add_start_docstrings(
911
+ """Bark semantic (or text) model. It shares the same architecture as the coarse model.
912
+ It is a GPT-2 like autoregressive model with a language modeling head on top.""",
913
+ BARK_MODEL_START_DOCSTRING.format(config="BarkSemanticConfig"),
914
+ )
915
+ class BarkSemanticModel(BarkCausalModel):
916
+ base_model_prefix = "semantic"
917
+ config_class = BarkSemanticConfig
918
+
919
+ def generate(
920
+ self,
921
+ input_ids: torch.Tensor,
922
+ semantic_generation_config: BarkSemanticGenerationConfig = None,
923
+ history_prompt: Optional[Dict[str, torch.Tensor]] = None,
924
+ attention_mask: Optional[torch.Tensor] = None,
925
+ **kwargs,
926
+ ) -> torch.LongTensor:
927
+ """
928
+ Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt.
929
+
930
+ Args:
931
+ input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*):
932
+ Input ids, i.e tokenized input sentences. Will be truncated up to
933
+ semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as
934
+ long as the longest generation among the batch.
935
+ semantic_generation_config (`BarkSemanticGenerationConfig`):
936
+ Generation config indicating how to generate the semantic tokens.
937
+ history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*):
938
+ Optional `Bark` speaker prompt.
939
+ attention_mask (`Optional[torch.Tensor]`, *optional*):
940
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
941
+
942
+ - 1 for tokens that are **not masked**,
943
+ - 0 for tokens that are **masked**.
944
+
945
+ [What are attention masks?](../glossary#attention-mask)
946
+ Returns:
947
+ torch.LongTensor: Output semantic tokens.
948
+ """
949
+ if semantic_generation_config is None:
950
+ raise ValueError("`semantic_generation_config` has to be provided")
951
+
952
+ batch_size = input_ids.shape[0]
953
+
954
+ max_input_semantic_length = semantic_generation_config.max_input_semantic_length
955
+
956
+ input_ids = input_ids + semantic_generation_config.text_encoding_offset
957
+
958
+ if attention_mask is not None:
959
+ input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token)
960
+
961
+ if history_prompt is not None:
962
+ semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:]
963
+ semantic_history = nn.functional.pad(
964
+ semantic_history,
965
+ (0, max_input_semantic_length - len(semantic_history)),
966
+ value=semantic_generation_config.semantic_pad_token,
967
+ mode="constant",
968
+ )
969
+ else:
970
+ semantic_history = torch.tensor(
971
+ [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int
972
+ ).to(self.device)
973
+
974
+ semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0)
975
+
976
+ infer_array = torch.tensor(
977
+ [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int
978
+ ).to(self.device)
979
+
980
+ input_embeds = torch.cat(
981
+ [
982
+ self.input_embeds_layer(input_ids[:, :max_input_semantic_length])
983
+ + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]),
984
+ self.input_embeds_layer(infer_array),
985
+ ],
986
+ dim=1,
987
+ )
988
+
989
+ tokens_to_suppress = list(
990
+ range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token)
991
+ )
992
+ tokens_to_suppress.extend(
993
+ list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size))
994
+ )
995
+
996
+ suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress)
997
+
998
+ min_eos_p = kwargs.get("min_eos_p", semantic_generation_config.min_eos_p)
999
+ early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor(
1000
+ eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p
1001
+ )
1002
+
1003
+ # pass input_ids in order to stay consistent with the transformers generate method even though it is not used
1004
+ # (except to get the input seq_len - that's why we keep the first 257 tokens)
1005
+ semantic_output = super().generate(
1006
+ torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device),
1007
+ input_embeds=input_embeds,
1008
+ logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor],
1009
+ generation_config=semantic_generation_config,
1010
+ **kwargs,
1011
+ ) # size: 10048
1012
+
1013
+ # take the generated semantic tokens
1014
+ semantic_output = semantic_output[:, max_input_semantic_length + 1 :]
1015
+
1016
+ return semantic_output
1017
+
1018
+
1019
+ @add_start_docstrings(
1020
+ """Bark coarse acoustics model.
1021
+ It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a
1022
+ language modeling head on top.""",
1023
+ BARK_MODEL_START_DOCSTRING.format(config="BarkCoarseConfig"),
1024
+ )
1025
+ class BarkCoarseModel(BarkCausalModel):
1026
+ base_model_prefix = "coarse_acoustics"
1027
+ config_class = BarkCoarseConfig
1028
+
1029
+ def preprocess_histories(
1030
+ self,
1031
+ max_coarse_history: int,
1032
+ semantic_to_coarse_ratio: int,
1033
+ batch_size: int,
1034
+ semantic_generation_config: int,
1035
+ codebook_size: int,
1036
+ history_prompt: Optional[Dict[str, torch.Tensor]] = None,
1037
+ ):
1038
+ """
1039
+ Preprocess the optional `Bark` speaker prompts before `self.generate`.
1040
+
1041
+ Args:
1042
+ max_coarse_history (`int`):
1043
+ Maximum size of coarse tokens used.
1044
+ semantic_to_coarse_ratio (`int`):
1045
+ Ratio of semantic to coarse frequency
1046
+ batch_size (`int`):
1047
+ Batch size, i.e the number of samples.
1048
+ semantic_generation_config (`BarkSemanticGenerationConfig`):
1049
+ Generation config indicating how to generate the semantic tokens.
1050
+ codebook_size (`int`):
1051
+ Codebook channel size, i.e. the size of the output vocabulary per codebook channel.
1052
+ history_prompt (`Optional[Dict[str,torch.Tensor]]`):
1053
+ Optional `Bark` speaker prompt.
1054
+ Returns: Returns:
1055
+ `tuple(torch.FloatTensor)`:
1056
+ - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt.
1057
+ - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt.
1058
+ """
1059
+ if history_prompt is not None:
1060
+ x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0)
1061
+ # clone to avoid modifying history_prompt.coarse_prompt
1062
+ x_coarse_history = history_prompt["coarse_prompt"].clone()
1063
+
1064
+ # offset x_coarse_history
1065
+ if codebook_size is not None:
1066
+ for n in range(1, x_coarse_history.shape[0]):
1067
+ # offset
1068
+ x_coarse_history[n, :] += codebook_size * n
1069
+
1070
+ # flatten x_coarse_history
1071
+ x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1)
1072
+
1073
+ x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size
1074
+
1075
+ x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0)
1076
+ # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens
1077
+ # dedicated to second codebook.
1078
+
1079
+ max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
1080
+ # trim histories correctly
1081
+ n_semantic_hist_provided = min(
1082
+ [
1083
+ max_semantic_history,
1084
+ x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2,
1085
+ int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)),
1086
+ ]
1087
+ )
1088
+
1089
+ n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
1090
+
1091
+ x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int()
1092
+ x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int()
1093
+ # bit of a hack for time alignment (sounds better) - from Bark original implementation
1094
+ x_coarse_history = x_coarse_history[:, :-2]
1095
+
1096
+ else:
1097
+ # shape: (batch_size, 0)
1098
+ x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device)
1099
+ x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device)
1100
+
1101
+ return x_semantic_history, x_coarse_history
1102
+
1103
+ def generate(
1104
+ self,
1105
+ semantic_output: torch.Tensor,
1106
+ semantic_generation_config: BarkSemanticGenerationConfig = None,
1107
+ coarse_generation_config: BarkCoarseGenerationConfig = None,
1108
+ codebook_size: int = 1024,
1109
+ history_prompt: Optional[Dict[str, torch.Tensor]] = None,
1110
+ return_output_lengths: Optional[bool] = None,
1111
+ **kwargs,
1112
+ ) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]:
1113
+ """
1114
+ Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker
1115
+ prompt.
1116
+
1117
+ Args:
1118
+ semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*):
1119
+ Input text semantic ids, i.e the output of `BarkSemanticModel.generate`.
1120
+ semantic_generation_config (`BarkSemanticGenerationConfig`):
1121
+ Generation config indicating how to generate the semantic tokens.
1122
+ coarse_generation_config (`BarkCoarseGenerationConfig`):
1123
+ Generation config indicating how to generate the coarse tokens.
1124
+ codebook_size (`int`, *optional*, defaults to 1024):
1125
+ Codebook channel size, i.e. the size of the output vocabulary per codebook channel.
1126
+ history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*):
1127
+ Optional `Bark` speaker prompt.
1128
+ return_output_lengths (`bool`, *optional*):
1129
+ Whether or not to return the output lengths. Useful when batching.
1130
+ Returns:
1131
+ By default:
1132
+ torch.LongTensor: Output coarse acoustics tokens.
1133
+ If `return_output_lengths=True`:
1134
+ `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample
1135
+ of the batch.
1136
+ """
1137
+
1138
+ if semantic_generation_config is None:
1139
+ raise ValueError("`semantic_generation_config` has to be provided")
1140
+
1141
+ if coarse_generation_config is None:
1142
+ raise ValueError("`coarse_generation_config` has to be provided")
1143
+
1144
+ max_coarse_input_length = coarse_generation_config.max_coarse_input_length
1145
+ max_coarse_history = coarse_generation_config.max_coarse_history
1146
+ sliding_window_len = coarse_generation_config.sliding_window_len
1147
+
1148
+ # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token
1149
+ # used in the next model
1150
+ semantic_output.masked_fill_(
1151
+ semantic_output == semantic_generation_config.semantic_pad_token,
1152
+ coarse_generation_config.coarse_semantic_pad_token,
1153
+ )
1154
+
1155
+ semantic_to_coarse_ratio = (
1156
+ coarse_generation_config.coarse_rate_hz
1157
+ / semantic_generation_config.semantic_rate_hz
1158
+ * coarse_generation_config.n_coarse_codebooks
1159
+ )
1160
+ max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
1161
+
1162
+ output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1)
1163
+ output_lengths = torch.floor(
1164
+ output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks
1165
+ )
1166
+ output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int()
1167
+
1168
+ max_generated_len = torch.max(output_lengths).item()
1169
+
1170
+ batch_size = semantic_output.shape[0]
1171
+
1172
+ x_semantic_history, x_coarse = self.preprocess_histories(
1173
+ history_prompt=history_prompt,
1174
+ max_coarse_history=max_coarse_history,
1175
+ semantic_to_coarse_ratio=semantic_to_coarse_ratio,
1176
+ batch_size=batch_size,
1177
+ semantic_generation_config=semantic_generation_config,
1178
+ codebook_size=codebook_size,
1179
+ )
1180
+ base_semantic_idx = x_semantic_history.shape[1]
1181
+
1182
+ semantic_output = torch.hstack([x_semantic_history, semantic_output])
1183
+
1184
+ n_window_steps = int(np.ceil(max_generated_len / sliding_window_len))
1185
+
1186
+ total_generated_len = 0
1187
+
1188
+ len_coarse_history = x_coarse.shape[1]
1189
+
1190
+ for _ in range(n_window_steps):
1191
+ semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio))
1192
+
1193
+ # pad from right side
1194
+ input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :]
1195
+ input_coarse = input_coarse[:, :max_coarse_input_length]
1196
+ input_coarse = F.pad(
1197
+ input_coarse,
1198
+ (0, max_coarse_input_length - input_coarse.shape[-1]),
1199
+ "constant",
1200
+ coarse_generation_config.coarse_semantic_pad_token,
1201
+ )
1202
+
1203
+ input_coarse = torch.hstack(
1204
+ [
1205
+ input_coarse,
1206
+ torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device),
1207
+ x_coarse[:, -max_coarse_history:],
1208
+ ]
1209
+ )
1210
+
1211
+ alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor(
1212
+ input_coarse.shape[1],
1213
+ semantic_generation_config.semantic_vocab_size,
1214
+ codebook_size,
1215
+ )
1216
+
1217
+ output_coarse = super().generate(
1218
+ input_coarse,
1219
+ logits_processor=[alternatingLogitsProcessor],
1220
+ max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len),
1221
+ generation_config=coarse_generation_config,
1222
+ **kwargs,
1223
+ )
1224
+
1225
+ input_coarse_len = input_coarse.shape[1]
1226
+
1227
+ x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]])
1228
+ total_generated_len = x_coarse.shape[1] - len_coarse_history
1229
+
1230
+ del output_coarse
1231
+
1232
+ coarse_output = x_coarse[:, len_coarse_history:]
1233
+
1234
+ if return_output_lengths:
1235
+ return coarse_output, output_lengths
1236
+
1237
+ return coarse_output
1238
+
1239
+
1240
+ @add_start_docstrings(
1241
+ """Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and
1242
+ language modeling heads, one for each codebook.""",
1243
+ BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"),
1244
+ )
1245
+ class BarkFineModel(BarkPreTrainedModel):
1246
+ base_model_prefix = "fine_acoustics"
1247
+ config_class = BarkFineConfig
1248
+ main_input_name = "codebook_idx"
1249
+
1250
+ def __init__(self, config):
1251
+ # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec
1252
+ super().__init__(config)
1253
+ self.config = config
1254
+
1255
+ # initialize a modified non causal GPT-like model
1256
+ # note that for there is one embedding layer and one lm_head for each codebook of Encodec
1257
+ self.input_embeds_layers = nn.ModuleList(
1258
+ [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)]
1259
+ )
1260
+ self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size)
1261
+
1262
+ self.drop = nn.Dropout(config.dropout)
1263
+
1264
+ self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)])
1265
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1266
+
1267
+ self.layernorm_final = nn.LayerNorm(config.hidden_size)
1268
+
1269
+ self.lm_heads = nn.ModuleList(
1270
+ [
1271
+ nn.Linear(config.hidden_size, config.output_vocab_size, bias=False)
1272
+ for _ in range(config.n_codes_given, config.n_codes_total)
1273
+ ]
1274
+ )
1275
+ self.gradient_checkpointing = False
1276
+ self.n_codes_total = config.n_codes_total
1277
+
1278
+ # Initialize weights and apply final processing
1279
+ self.post_init()
1280
+
1281
+ def get_input_embeddings(self):
1282
+ # one embedding layers for each codebook
1283
+ return self.input_embeds_layers
1284
+
1285
+ def set_input_embeddings(self, new_embeddings):
1286
+ # one embedding layers for each codebook
1287
+ self.input_embeds_layers = new_embeddings
1288
+
1289
+ def get_output_embeddings(self):
1290
+ # one lm_head for each codebook
1291
+ return self.lm_heads
1292
+
1293
+ def set_output_embeddings(self, new_output_embeddings):
1294
+ # one lm_head for each codebook
1295
+ self.lm_heads = new_output_embeddings
1296
+
1297
+ def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None):
1298
+ old_embeddings_list = self.get_input_embeddings()
1299
+ new_embeddings_list = nn.ModuleList(
1300
+ [
1301
+ self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of)
1302
+ for old_embeddings in old_embeddings_list
1303
+ ]
1304
+ )
1305
+ self.set_input_embeddings(new_embeddings_list)
1306
+ new_num_tokens = new_embeddings_list[0].weight.shape[0]
1307
+
1308
+ # if word embeddings are not tied, make sure that lm head is resized as well
1309
+ if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
1310
+ old_lm_head_list = self.get_output_embeddings()
1311
+ new_lm_head_list = nn.ModuleList(
1312
+ [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list]
1313
+ )
1314
+ self.set_output_embeddings(new_lm_head_list)
1315
+
1316
+ return self.get_input_embeddings()
1317
+
1318
+ def resize_token_embeddings(
1319
+ self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None
1320
+ ) -> nn.Embedding:
1321
+ """
1322
+ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
1323
+
1324
+ Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
1325
+
1326
+ Arguments:
1327
+ new_num_tokens (`int`, *optional*):
1328
+ The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
1329
+ vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
1330
+ returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
1331
+ pad_to_multiple_of (`int`, *optional*):
1332
+ If set will pad the embedding matrix to a multiple of the provided value.
1333
+
1334
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
1335
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more
1336
+ details about this, or help on choosing the correct value for resizing, refer to this guide:
1337
+ https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc
1338
+
1339
+ Return:
1340
+ `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
1341
+ """
1342
+ model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1343
+ if new_num_tokens is None and pad_to_multiple_of is None:
1344
+ return model_embeds
1345
+
1346
+ # Update base model and current model config
1347
+ self.config.output_vocab_size = model_embeds[0].weight.shape[0]
1348
+ self.config.vocab_size = model_embeds[0].weight.shape[0]
1349
+ self.output_vocab_size = model_embeds[0].weight.shape[0]
1350
+ self.vocab_size = model_embeds[0].weight.shape[0]
1351
+
1352
+ # Tie weights again if needed
1353
+ self.tie_weights()
1354
+
1355
+ return model_embeds
1356
+
1357
+ def tie_weights(self):
1358
+ """
1359
+ Tie the weights between the input embeddings list and the output embeddings list.
1360
+
1361
+ If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the
1362
+ weights instead.
1363
+ """
1364
+ if getattr(self.config, "tie_word_embeddings", True):
1365
+ self._tied_weights_keys = []
1366
+ output_embeddings = self.get_output_embeddings()
1367
+ input_embeddings = self.get_input_embeddings()
1368
+
1369
+ for i in range(self.config.n_codes_total - self.config.n_codes_given):
1370
+ # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight
1371
+ self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1])
1372
+ self._tied_weights_keys.append(f"lm_heads.{i}.weight")
1373
+
1374
+ for module in self.modules():
1375
+ if hasattr(module, "_tie_weights"):
1376
+ module._tie_weights()
1377
+
1378
+ @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING)
1379
+ def forward(
1380
+ self,
1381
+ codebook_idx: int, # an additionnal idx corresponding to the id of the codebook that will be predicted
1382
+ input_ids: Optional[torch.Tensor] = None,
1383
+ attention_mask: Optional[torch.Tensor] = None,
1384
+ position_ids: Optional[torch.Tensor] = None,
1385
+ head_mask: Optional[torch.Tensor] = None,
1386
+ labels: Optional[torch.LongTensor] = None,
1387
+ input_embeds: Optional[torch.Tensor] = None,
1388
+ output_attentions: Optional[bool] = None,
1389
+ output_hidden_states: Optional[bool] = None,
1390
+ return_dict: Optional[bool] = None,
1391
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
1392
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1393
+ output_hidden_states = (
1394
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1395
+ )
1396
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1397
+
1398
+ if codebook_idx == 0:
1399
+ raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model")
1400
+
1401
+ if input_ids is not None and input_embeds is not None:
1402
+ raise ValueError("You cannot specify both input_ids and input_embeds at the same time")
1403
+
1404
+ if input_ids is None and input_embeds is None:
1405
+ raise ValueError("You have to specify either input_ids or input_embeds")
1406
+
1407
+ if input_ids is not None:
1408
+ # the input_embeddings are the sum of the j previous codebooks embeddings before
1409
+ # the current codebook_idx codebook
1410
+
1411
+ # forward the GPT model itself
1412
+ input_embeds = [
1413
+ input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1)
1414
+ for i, input_embeds_layer in enumerate(self.input_embeds_layers)
1415
+ ] # token embeddings of shape (b, t, n_embd)
1416
+ input_embeds = torch.cat(input_embeds, dim=-1)
1417
+ input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1)
1418
+
1419
+ input_shape = input_embeds.size()[:-1]
1420
+ batch_size = input_embeds.shape[0]
1421
+ seq_length = input_shape[1]
1422
+
1423
+ device = input_ids.device if input_ids is not None else input_embeds.device
1424
+
1425
+ if position_ids is None:
1426
+ position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device)
1427
+ position_ids = position_ids.unsqueeze(0) # shape (1, seq_length)
1428
+
1429
+ position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd)
1430
+
1431
+ # Attention mask.
1432
+ if attention_mask is not None:
1433
+ if batch_size <= 0:
1434
+ raise ValueError("batch_size has to be defined and > 0")
1435
+ if self._use_flash_attention_2:
1436
+ attention_mask = attention_mask if 0 in attention_mask else None
1437
+ else:
1438
+ # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length]
1439
+ # from_seq_length is 1 to easily broadcast
1440
+ attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1)
1441
+
1442
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
1443
+
1444
+ hidden_states = self.drop(input_embeds + position_embeds)
1445
+ output_shape = input_shape + (hidden_states.size(-1),)
1446
+
1447
+ all_self_attentions = () if output_attentions else None
1448
+ all_hidden_states = () if output_hidden_states else None
1449
+
1450
+ for i, block in enumerate(self.layers):
1451
+ if output_hidden_states:
1452
+ all_hidden_states = all_hidden_states + (hidden_states,)
1453
+
1454
+ outputs = block(
1455
+ hidden_states,
1456
+ attention_mask=attention_mask,
1457
+ head_mask=head_mask[i],
1458
+ output_attentions=output_attentions,
1459
+ )
1460
+
1461
+ hidden_states = outputs[0]
1462
+
1463
+ if output_attentions:
1464
+ all_self_attentions = all_self_attentions + (outputs[1],)
1465
+
1466
+ hidden_states = self.layernorm_final(hidden_states)
1467
+ hidden_states = hidden_states.view(output_shape)
1468
+
1469
+ # Add last hidden state
1470
+ if output_hidden_states:
1471
+ all_hidden_states = all_hidden_states + (hidden_states,)
1472
+
1473
+ logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states)
1474
+
1475
+ loss = None
1476
+ if labels is not None:
1477
+ raise NotImplementedError("Training is not implemented yet")
1478
+
1479
+ if not return_dict:
1480
+ return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None)
1481
+
1482
+ return MaskedLMOutput(
1483
+ loss=loss,
1484
+ logits=logits,
1485
+ hidden_states=all_hidden_states,
1486
+ attentions=all_self_attentions,
1487
+ )
1488
+
1489
+ def generate(
1490
+ self,
1491
+ coarse_output: torch.Tensor,
1492
+ semantic_generation_config: BarkSemanticGenerationConfig = None,
1493
+ coarse_generation_config: BarkCoarseGenerationConfig = None,
1494
+ fine_generation_config: BarkFineGenerationConfig = None,
1495
+ codebook_size: int = 1024,
1496
+ history_prompt: Optional[Dict[str, torch.Tensor]] = None,
1497
+ **kwargs,
1498
+ ) -> torch.LongTensor:
1499
+ """
1500
+ Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker
1501
+ prompt.
1502
+
1503
+ Args:
1504
+ coarse_output (`torch.Tensor` of shape (batch_size, seq_len)):
1505
+ Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`.
1506
+ semantic_generation_config (`BarkSemanticGenerationConfig`):
1507
+ Generation config indicating how to generate the semantic tokens.
1508
+ coarse_generation_config (`BarkCoarseGenerationConfig`):
1509
+ Generation config indicating how to generate the coarse tokens.
1510
+ fine_generation_config (`BarkFineGenerationConfig`):
1511
+ Generation config indicating how to generate the fine tokens.
1512
+ codebook_size (`int`, *optional*, defaults to 1024):
1513
+ Codebook channel size, i.e. the size of the output vocabulary per codebook channel.
1514
+ history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*):
1515
+ Optional `Bark` speaker prompt.
1516
+ Returns:
1517
+ torch.LongTensor: Output fine acoustics tokens.
1518
+ """
1519
+ if semantic_generation_config is None:
1520
+ raise ValueError("`semantic_generation_config` has to be provided")
1521
+
1522
+ if coarse_generation_config is None:
1523
+ raise ValueError("`coarse_generation_config` has to be provided")
1524
+
1525
+ if fine_generation_config is None:
1526
+ raise ValueError("`fine_generation_config` has to be provided")
1527
+
1528
+ # since we don't really use GenerationConfig through the fine model (autoencoder)
1529
+ # and since only temperature is used from the classic GenerationConfig parameters
1530
+ # manually impose the kwargs priority over the generation config
1531
+ temperature = kwargs.get("temperature", fine_generation_config.temperature)
1532
+
1533
+ max_fine_history_length = fine_generation_config.max_fine_history_length
1534
+ max_fine_input_length = fine_generation_config.max_fine_input_length
1535
+
1536
+ # shape: (batch, n_coarse_codebooks * seq_len)
1537
+ # new_shape: (batch, seq_len, n_coarse_codebooks)
1538
+ coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks)
1539
+
1540
+ # brings ids into the range [0, codebook_size -1]
1541
+ coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size)
1542
+ batch_size = coarse_output.shape[0]
1543
+
1544
+ if history_prompt is not None:
1545
+ x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0)
1546
+ # transpose to get to shape (seq_len, n_fine_codebooks)
1547
+ else:
1548
+ x_fine_history = None
1549
+
1550
+ n_coarse = coarse_generation_config.n_coarse_codebooks
1551
+
1552
+ # pad the last 6th codebooks
1553
+ fine_input = F.pad(
1554
+ coarse_output,
1555
+ (0, fine_generation_config.n_fine_codebooks - n_coarse),
1556
+ "constant",
1557
+ codebook_size,
1558
+ )
1559
+
1560
+ # prepend history if available (max max_fine_history_length)
1561
+ if x_fine_history is not None:
1562
+ fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1)
1563
+
1564
+ # len of the fine_history that has been added to fine_input
1565
+ n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1]
1566
+ else:
1567
+ n_history = 0
1568
+
1569
+ n_remove_from_end = 0
1570
+ # need to pad if too short (since non-causal model)
1571
+ if fine_input.shape[1] < max_fine_input_length:
1572
+ n_remove_from_end = max_fine_input_length - fine_input.shape[1]
1573
+ fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size)
1574
+
1575
+ # we can be lazy about fractional loop and just keep overwriting codebooks.
1576
+ # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end
1577
+ # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0)
1578
+ # If not, we loop over at least twice.
1579
+
1580
+ n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length
1581
+ n_loops = int(np.ceil(n_loops))
1582
+ n_loops = max(0, n_loops) + 1
1583
+
1584
+ for n_outer in range(n_loops):
1585
+ start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length])
1586
+
1587
+ start_fill_idx = min(
1588
+ [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length]
1589
+ )
1590
+ rel_start_fill_idx = start_fill_idx - start_idx
1591
+ input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :]
1592
+ for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks):
1593
+ logits = self.forward(n_inner, input_buffer).logits
1594
+ if temperature is None or temperature == 1.0:
1595
+ relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size]
1596
+ codebook_preds = torch.argmax(relevant_logits, -1)
1597
+ else:
1598
+ relevant_logits = logits[:, :, :codebook_size] / temperature
1599
+ # apply softmax
1600
+ probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length]
1601
+ # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size)
1602
+ probs = probs.reshape((-1, codebook_size))
1603
+ # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len)
1604
+ codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1)
1605
+ codebook_preds = codebook_preds.to(torch.int32)
1606
+ input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds
1607
+ del logits, codebook_preds
1608
+
1609
+ # transfer into fine_input
1610
+ for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks):
1611
+ fine_input[
1612
+ :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner
1613
+ ] = input_buffer[:, rel_start_fill_idx:, n_inner]
1614
+ del input_buffer
1615
+
1616
+ fine_input = fine_input.transpose(1, 2)[:, :, n_history:]
1617
+ if n_remove_from_end > 0:
1618
+ fine_input = fine_input[:, :, :-n_remove_from_end]
1619
+
1620
+ if fine_input.shape[-1] != coarse_output.shape[-2]:
1621
+ raise ValueError("input and output should have the same seq_len")
1622
+
1623
+ return fine_input
1624
+
1625
+
1626
+ @add_start_docstrings(
1627
+ """
1628
+ The full Bark model, a text-to-speech model composed of 4 sub-models:
1629
+ - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that
1630
+ takes
1631
+ as input tokenized text, and predicts semantic text tokens that capture the meaning of the text.
1632
+ - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer,
1633
+ that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary
1634
+ to `encodec`.
1635
+ - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively
1636
+ predicts the last codebooks based on the sum of the previous codebooks embeddings.
1637
+ - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio
1638
+ array.
1639
+
1640
+ It should be noted that each of the first three modules can support conditional speaker embeddings to condition the
1641
+ output sound according to specific predefined voice.
1642
+ """,
1643
+ BARK_START_DOCSTRING,
1644
+ )
1645
+ class BarkModel(BarkPreTrainedModel):
1646
+ config_class = BarkConfig
1647
+
1648
+ def __init__(self, config):
1649
+ super().__init__(config)
1650
+
1651
+ self.semantic = BarkSemanticModel(config.semantic_config)
1652
+ self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config)
1653
+ self.fine_acoustics = BarkFineModel(config.fine_acoustics_config)
1654
+
1655
+ self.codec_model = AutoModel.from_config(config.codec_config)
1656
+
1657
+ self.config = config
1658
+
1659
+ @property
1660
+ def device(self) -> torch.device:
1661
+ """
1662
+ `torch.device`: The device on which the module is (assuming that all the module parameters are on the same
1663
+ device).
1664
+ """
1665
+ # for bark_model, device must be verified on its sub-models
1666
+ # if has _hf_hook, has been offloaded so the device has to be found in the hook
1667
+ if not hasattr(self.semantic, "_hf_hook"):
1668
+ return get_parameter_device(self)
1669
+ for module in self.semantic.modules():
1670
+ if (
1671
+ hasattr(module, "_hf_hook")
1672
+ and hasattr(module._hf_hook, "execution_device")
1673
+ and module._hf_hook.execution_device is not None
1674
+ ):
1675
+ return torch.device(module._hf_hook.execution_device)
1676
+
1677
+ def enable_cpu_offload(self, gpu_id: Optional[int] = 0):
1678
+ r"""
1679
+ Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This
1680
+ method moves one whole sub-model at a time to the GPU when it is used, and the sub-model remains in GPU until
1681
+ the next sub-model runs.
1682
+
1683
+ Args:
1684
+ gpu_id (`int`, *optional*, defaults to 0):
1685
+ GPU id on which the sub-models will be loaded and offloaded.
1686
+ """
1687
+ if is_accelerate_available():
1688
+ from accelerate import cpu_offload_with_hook
1689
+ else:
1690
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate`.")
1691
+
1692
+ device = torch.device(f"cuda:{gpu_id}")
1693
+
1694
+ if self.device.type != "cpu":
1695
+ self.to("cpu")
1696
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
1697
+
1698
+ # this layer is used outside the first foward pass of semantic so need to be loaded before semantic
1699
+ self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device)
1700
+
1701
+ hook = None
1702
+ for cpu_offloaded_model in [
1703
+ self.semantic,
1704
+ self.coarse_acoustics,
1705
+ self.fine_acoustics,
1706
+ ]:
1707
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
1708
+
1709
+ self.fine_acoustics_hook = hook
1710
+
1711
+ _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook)
1712
+
1713
+ # We'll offload the last model manually.
1714
+ self.codec_model_hook = hook
1715
+
1716
+ def codec_decode(self, fine_output, output_lengths=None):
1717
+ """Turn quantized audio codes into audio array using encodec."""
1718
+
1719
+ fine_output = fine_output.transpose(0, 1)
1720
+ emb = self.codec_model.quantizer.decode(fine_output)
1721
+
1722
+ if output_lengths is not None:
1723
+ # encodec uses LSTMs which behaves differently with appended padding
1724
+ # decoding with encodec takes around 0.1% of the total generation time
1725
+ # to keep generation quality, we break batching
1726
+ out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)]
1727
+ audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out]
1728
+ else:
1729
+ out = self.codec_model.decoder(emb)
1730
+ audio_arr = out.squeeze(1) # squeeze the codebook dimension
1731
+
1732
+ return audio_arr
1733
+
1734
+ @torch.no_grad()
1735
+ def generate(
1736
+ self,
1737
+ input_ids: Optional[torch.Tensor] = None,
1738
+ history_prompt: Optional[Dict[str, torch.Tensor]] = None,
1739
+ return_output_lengths: Optional[bool] = None,
1740
+ **kwargs,
1741
+ ) -> torch.LongTensor:
1742
+ """
1743
+ Generates audio from an input prompt and an additional optional `Bark` speaker prompt.
1744
+
1745
+ Args:
1746
+ input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*):
1747
+ Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the
1748
+ longest generation among the batch.
1749
+ history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*):
1750
+ Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch.
1751
+ kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types:
1752
+
1753
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model.
1754
+ - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the
1755
+ semantic, coarse and fine respectively. It has the priority over the keywords without a prefix.
1756
+
1757
+ This means you can, for example, specify a generation strategy for all sub-models except one.
1758
+ return_output_lengths (`bool`, *optional*):
1759
+ Whether or not to return the waveform lengths. Useful when batching.
1760
+ Returns:
1761
+ By default:
1762
+ - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform.
1763
+ When `return_output_lengths=True`:
1764
+ Returns a tuple made of:
1765
+ - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform.
1766
+ - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch
1767
+ Example:
1768
+
1769
+ ```python
1770
+ >>> from transformers import AutoProcessor, BarkModel
1771
+
1772
+ >>> processor = AutoProcessor.from_pretrained("suno/bark-small")
1773
+ >>> model = BarkModel.from_pretrained("suno/bark-small")
1774
+
1775
+ >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)`
1776
+ >>> voice_preset = "v2/en_speaker_6"
1777
+
1778
+ >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset)
1779
+
1780
+ >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100)
1781
+ >>> audio_array = audio_array.cpu().numpy().squeeze()
1782
+ ```
1783
+ """
1784
+ # TODO (joao):workaround until nested generation config is compatible with PreTrained Model
1785
+ # todo: dict
1786
+ semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config)
1787
+ coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config)
1788
+ fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config)
1789
+
1790
+ kwargs_semantic = {
1791
+ # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel
1792
+ "attention_mask": kwargs.pop("attention_mask", None),
1793
+ "min_eos_p": kwargs.pop("min_eos_p", None),
1794
+ }
1795
+ kwargs_coarse = {}
1796
+ kwargs_fine = {}
1797
+ for key, value in kwargs.items():
1798
+ if key.startswith("semantic_"):
1799
+ key = key[len("semantic_") :]
1800
+ kwargs_semantic[key] = value
1801
+ elif key.startswith("coarse_"):
1802
+ key = key[len("coarse_") :]
1803
+ kwargs_coarse[key] = value
1804
+ elif key.startswith("fine_"):
1805
+ key = key[len("fine_") :]
1806
+ kwargs_fine[key] = value
1807
+ else:
1808
+ # If the key is already in a specific config, then it's been set with a
1809
+ # submodules specific value and we don't override
1810
+ if key not in kwargs_semantic:
1811
+ kwargs_semantic[key] = value
1812
+ if key not in kwargs_coarse:
1813
+ kwargs_coarse[key] = value
1814
+ if key not in kwargs_fine:
1815
+ kwargs_fine[key] = value
1816
+
1817
+ # 1. Generate from the semantic model
1818
+ semantic_output = self.semantic.generate(
1819
+ input_ids,
1820
+ history_prompt=history_prompt,
1821
+ semantic_generation_config=semantic_generation_config,
1822
+ **kwargs_semantic,
1823
+ )
1824
+
1825
+ # 2. Generate from the coarse model
1826
+ coarse_output = self.coarse_acoustics.generate(
1827
+ semantic_output,
1828
+ history_prompt=history_prompt,
1829
+ semantic_generation_config=semantic_generation_config,
1830
+ coarse_generation_config=coarse_generation_config,
1831
+ codebook_size=self.generation_config.codebook_size,
1832
+ return_output_lengths=return_output_lengths,
1833
+ **kwargs_coarse,
1834
+ )
1835
+
1836
+ output_lengths = None
1837
+ if return_output_lengths:
1838
+ coarse_output, output_lengths = coarse_output
1839
+ # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len)
1840
+ output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks
1841
+
1842
+ # 3. "generate" from the fine model
1843
+ output = self.fine_acoustics.generate(
1844
+ coarse_output,
1845
+ history_prompt=history_prompt,
1846
+ semantic_generation_config=semantic_generation_config,
1847
+ coarse_generation_config=coarse_generation_config,
1848
+ fine_generation_config=fine_generation_config,
1849
+ codebook_size=self.generation_config.codebook_size,
1850
+ **kwargs_fine,
1851
+ )
1852
+
1853
+ if getattr(self, "fine_acoustics_hook", None) is not None:
1854
+ # Manually offload fine_acoustics to CPU
1855
+ # and load codec_model to GPU
1856
+ # since bark doesn't use codec_model forward pass
1857
+ self.fine_acoustics_hook.offload()
1858
+ self.codec_model = self.codec_model.to(self.device)
1859
+
1860
+ # 4. Decode the output and generate audio array
1861
+ audio = self.codec_decode(output, output_lengths)
1862
+
1863
+ if getattr(self, "codec_model_hook", None) is not None:
1864
+ # Offload codec_model to CPU
1865
+ self.codec_model_hook.offload()
1866
+
1867
+ if return_output_lengths:
1868
+ output_lengths = [len(sample) for sample in audio]
1869
+ audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0)
1870
+ return audio, output_lengths
1871
+
1872
+ return audio
1873
+
1874
+ @classmethod
1875
+ def _check_and_enable_flash_attn_2(
1876
+ cls,
1877
+ config,
1878
+ torch_dtype: Optional[torch.dtype] = None,
1879
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
1880
+ hard_check_only: bool = False,
1881
+ check_device_map: bool = False,
1882
+ ):
1883
+ """
1884
+ `_check_and_enable_flash_attn_2` originally don't expand flash attention enabling to the model
1885
+ sub-configurations. We override the original method to make sure that Bark sub-models are using Flash Attention
1886
+ if necessary.
1887
+
1888
+ If you don't know about Flash Attention, check out the official repository of flash attention:
1889
+ https://github.com/Dao-AILab/flash-attention
1890
+
1891
+ For using Flash Attention 1.0 you can do it directly via the `BetterTransformer` API, have a look at this
1892
+ specific section of the documentation to learn more about it:
1893
+ https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#decoder-models
1894
+
1895
+ The method checks if the current setup is compatible with Flash Attention as it requires the model to be in
1896
+ half precision and not ran on CPU.
1897
+
1898
+ If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model
1899
+ can initialize the correct attention module
1900
+ """
1901
+ config = super()._check_and_enable_flash_attn_2(
1902
+ config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map
1903
+ )
1904
+
1905
+ config.semantic_config._attn_implementation = config._attn_implementation
1906
+ config.coarse_acoustics_config._attn_implementation = config._attn_implementation
1907
+ config.fine_acoustics_config._attn_implementation = config._attn_implementation
1908
+ return config
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 MetaAI and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
17
+
18
+
19
+ _import_structure = {}
20
+
21
+ try:
22
+ if not is_sentencepiece_available():
23
+ raise OptionalDependencyNotAvailable()
24
+ except OptionalDependencyNotAvailable:
25
+ pass
26
+ else:
27
+ _import_structure["tokenization_code_llama"] = ["CodeLlamaTokenizer"]
28
+
29
+ try:
30
+ if not is_tokenizers_available():
31
+ raise OptionalDependencyNotAvailable()
32
+ except OptionalDependencyNotAvailable:
33
+ pass
34
+ else:
35
+ _import_structure["tokenization_code_llama_fast"] = ["CodeLlamaTokenizerFast"]
36
+
37
+ if TYPE_CHECKING:
38
+ try:
39
+ if not is_sentencepiece_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ from .tokenization_code_llama import CodeLlamaTokenizer
45
+
46
+ try:
47
+ if not is_tokenizers_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .tokenization_code_llama_fast import CodeLlamaTokenizerFast
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (940 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/tokenization_code_llama.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/__pycache__/tokenization_code_llama_fast.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 MetaAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for Code LLaMA."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...convert_slow_tokenizer import import_protobuf
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from ...utils import logging, requires_backends
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
32
+
33
+ SPIECE_UNDERLINE = "▁"
34
+
35
+ B_INST, E_INST = "[INST]", "[/INST]"
36
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
37
+
38
+ # fmt: off
39
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
40
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
41
+ that your responses are socially unbiased and positive in nature.
42
+
43
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
44
+ correct. If you don't know the answer to a question, please don't share false information."""
45
+ # fmt: on
46
+
47
+
48
+ class CodeLlamaTokenizer(PreTrainedTokenizer):
49
+ """
50
+ Construct a CodeLlama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as
51
+ there is no padding token in the original model.
52
+
53
+ The default configuration match that of
54
+ [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json)
55
+ which supports prompt infilling.
56
+
57
+ Args:
58
+ vocab_file (`str`):
59
+ Path to the vocabulary file.
60
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
61
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
62
+ token instead.
63
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
64
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
65
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
66
+ The end of sequence token.
67
+
68
+ <Tip>
69
+
70
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
71
+ The token used is the `sep_token`.
72
+
73
+ </Tip>
74
+
75
+ prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`):
76
+ Prefix token used for infilling.
77
+ middle_token (`str`, *optional*, defaults to `"▁<MID>"`):
78
+ Middle token used for infilling.
79
+ suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`):
80
+ Suffix token used for infilling.
81
+ eot_token (`str`, *optional*, defaults to `"▁<EOT>"`):
82
+ End of text token used for infilling.
83
+ fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`):
84
+ The token used to split the input between the prefix and suffix.
85
+ suffix_first (`bool`, *optional*, defaults to `False`):
86
+ Whether the input prompt and suffix should be formatted with the suffix first.
87
+ sp_model_kwargs (`dict`, *optional*):
88
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
89
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
90
+ to set:
91
+
92
+ - `enable_sampling`: Enable subword regularization.
93
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
94
+
95
+ - `nbest_size = {0,1}`: No sampling is performed.
96
+ - `nbest_size > 1`: samples from the nbest_size results.
97
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
98
+ using forward-filtering-and-backward-sampling algorithm.
99
+
100
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
101
+ BPE-dropout.
102
+ add_bos_token (`bool`, *optional*, defaults to `True`):
103
+ Whether to add a beginning of sequence token at the start of sequences.
104
+ add_eos_token (`bool`, *optional*, defaults to `False`):
105
+ Whether to add an end of sequence token at the end of sequences.
106
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
107
+ Whether or not to clean up the tokenization spaces.
108
+ additional_special_tokens (`List[str]`, *optional*):
109
+ Additional special tokens used by the tokenizer.
110
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
111
+ Whether or not the default system prompt for Llama should be used.
112
+ """
113
+
114
+ vocab_files_names = VOCAB_FILES_NAMES
115
+ model_input_names = ["input_ids", "attention_mask"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_file,
120
+ unk_token="<unk>",
121
+ bos_token="<s>",
122
+ eos_token="</s>",
123
+ prefix_token="▁<PRE>",
124
+ middle_token="▁<MID>",
125
+ suffix_token="▁<SUF>",
126
+ eot_token="▁<EOT>",
127
+ fill_token="<FILL_ME>",
128
+ suffix_first=False,
129
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
130
+ add_bos_token=True,
131
+ add_eos_token=False,
132
+ clean_up_tokenization_spaces=False,
133
+ additional_special_tokens=None,
134
+ use_default_system_prompt=False,
135
+ **kwargs,
136
+ ):
137
+ requires_backends(self, "protobuf")
138
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
139
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
140
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
141
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
142
+
143
+ self.use_default_system_prompt = use_default_system_prompt
144
+ # mark tokens special to skip them
145
+ additional_special_tokens = additional_special_tokens or []
146
+ for token in [prefix_token, middle_token, suffix_token, eot_token]:
147
+ additional_special_tokens += [token] if token is not None else []
148
+
149
+ self.vocab_file = vocab_file
150
+ self.add_bos_token = add_bos_token
151
+ self.add_eos_token = add_eos_token
152
+ self._prefix_token = prefix_token
153
+ self._middle_token = middle_token
154
+ self._suffix_token = suffix_token
155
+ self._eot_token = eot_token
156
+ self.fill_token = fill_token
157
+ self.suffix_first = suffix_first
158
+ self.sp_model = self.get_spm_processor()
159
+
160
+ super().__init__(
161
+ bos_token=bos_token,
162
+ eos_token=eos_token,
163
+ unk_token=unk_token,
164
+ add_bos_token=add_bos_token,
165
+ add_eos_token=add_eos_token,
166
+ prefix_token=prefix_token,
167
+ middle_token=middle_token,
168
+ suffix_token=suffix_token,
169
+ eot_token=eot_token,
170
+ fill_token=fill_token,
171
+ sp_model_kwargs=self.sp_model_kwargs,
172
+ suffix_first=suffix_first,
173
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
174
+ additional_special_tokens=additional_special_tokens,
175
+ use_default_system_prompt=use_default_system_prompt,
176
+ **kwargs,
177
+ )
178
+
179
+ @property
180
+ def unk_token_length(self):
181
+ return len(self.sp_model.encode(str(self.unk_token)))
182
+
183
+ def get_spm_processor(self):
184
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
185
+ with open(self.vocab_file, "rb") as f:
186
+ sp_model = f.read()
187
+ model_pb2 = import_protobuf()
188
+ model = model_pb2.ModelProto.FromString(sp_model)
189
+ normalizer_spec = model_pb2.NormalizerSpec()
190
+ normalizer_spec.add_dummy_prefix = False
191
+ model.normalizer_spec.MergeFrom(normalizer_spec)
192
+ sp_model = model.SerializeToString()
193
+ tokenizer.LoadFromSerializedProto(sp_model)
194
+ return tokenizer
195
+
196
+ @property
197
+ def prefix_token(self):
198
+ return self._prefix_token
199
+
200
+ @property
201
+ def prefix_id(self):
202
+ if self._prefix_token is None:
203
+ return None
204
+ return self.convert_tokens_to_ids(self.prefix_token)
205
+
206
+ @property
207
+ def middle_token(self):
208
+ return self._middle_token
209
+
210
+ @property
211
+ def middle_id(self):
212
+ if self._middle_token is None:
213
+ return None
214
+ return self.convert_tokens_to_ids(self.middle_token)
215
+
216
+ @property
217
+ def suffix_token(self):
218
+ return self._suffix_token
219
+
220
+ @property
221
+ def suffix_id(self):
222
+ if self._suffix_token is None:
223
+ return None
224
+ return self.convert_tokens_to_ids(self.suffix_token)
225
+
226
+ @property
227
+ def eot_token(self):
228
+ return self._eot_token
229
+
230
+ @property
231
+ def eot_id(self):
232
+ if self._eot_token is None:
233
+ return None
234
+ return self.convert_tokens_to_ids(self.eot_token)
235
+
236
+ @property
237
+ def vocab_size(self):
238
+ """Returns vocab size"""
239
+ return self.sp_model.get_piece_size()
240
+
241
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_vocab
242
+ def get_vocab(self):
243
+ """Returns vocab as a dict"""
244
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
245
+ vocab.update(self.added_tokens_encoder)
246
+ return vocab
247
+
248
+ def tokenize(self, prefix, suffix=None, suffix_first=False, **kwargs) -> List[int]:
249
+ # add a prefix space to `prefix`
250
+ if self.fill_token is not None and self.fill_token in prefix and suffix is None:
251
+ prefix, suffix = prefix.split(self.fill_token)
252
+
253
+ if len(prefix) > 0:
254
+ prefix = SPIECE_UNDERLINE + prefix.replace(SPIECE_UNDERLINE, " ")
255
+
256
+ if suffix is None or len(suffix) < 1:
257
+ tokens = super().tokenize(prefix, **kwargs)
258
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
259
+ tokens = tokens[1:]
260
+ return tokens
261
+
262
+ prefix_tokens = self._tokenize(prefix) # prefix has an extra `SPIECE_UNDERLINE`
263
+
264
+ if None in (self.prefix_id, self.middle_id, self.suffix_id):
265
+ raise ValueError(
266
+ "The input either includes a `prefix` and a `suffix` used for the infilling task,"
267
+ f" or can be split on the {self.fill_token} token, creating a suffix and prefix,"
268
+ " but the model does not support `infilling`."
269
+ )
270
+ suffix_tokens = self._tokenize(suffix) # make sure CodeLlama sp model does not mess up
271
+
272
+ suffix_first = suffix_first if suffix_first is not None else self.suffix_first
273
+ if suffix_first:
274
+ # format as " <PRE> <SUF>{suf} <MID> {pre}"
275
+ return [self.prefix_token, self.suffix_token] + suffix_tokens + [self.middle_token] + prefix_tokens
276
+ else:
277
+ # format as " <PRE> {pre} <SUF>{suf} <MID>"
278
+ return [self.prefix_token] + prefix_tokens + [self.suffix_token] + suffix_tokens + [self.middle_token]
279
+
280
+ def _tokenize(self, text, **kwargs):
281
+ """
282
+ Returns a tokenized string.
283
+
284
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
285
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
286
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
287
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
288
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
289
+ """
290
+ tokens = self.sp_model.encode(text, out_type=str)
291
+ if not text.startswith((SPIECE_UNDERLINE, " ")):
292
+ return tokens
293
+ # 1. Encode string + prefix ex: "<unk> Hey"
294
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
295
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
296
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
297
+
298
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_token_to_id
299
+ def _convert_token_to_id(self, token):
300
+ """Converts a token (str) in an id using the vocab."""
301
+ return self.sp_model.piece_to_id(token)
302
+
303
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer._convert_id_to_token
304
+ def _convert_id_to_token(self, index):
305
+ """Converts an index (integer) in a token (str) using the vocab."""
306
+ token = self.sp_model.IdToPiece(index)
307
+ return token
308
+
309
+ def convert_tokens_to_string(self, tokens):
310
+ """Converts a sequence of tokens (string) in a single string."""
311
+ # since we manually add the prefix space, we have to remove it when decoding
312
+ if tokens[0].startswith(SPIECE_UNDERLINE):
313
+ tokens[0] = tokens[0][1:]
314
+
315
+ current_sub_tokens = []
316
+ out_string = ""
317
+ for _, token in enumerate(tokens):
318
+ # make sure that special tokens are not decoded using sentencepiece model
319
+ if token in self.all_special_tokens:
320
+ out_string += self.sp_model.decode(current_sub_tokens) + token
321
+ current_sub_tokens = []
322
+ else:
323
+ current_sub_tokens.append(token)
324
+ out_string += self.sp_model.decode(current_sub_tokens)
325
+ return out_string
326
+
327
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.save_vocabulary
328
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
329
+ """
330
+ Save the vocabulary and special tokens file to a directory.
331
+
332
+ Args:
333
+ save_directory (`str`):
334
+ The directory in which to save the vocabulary.
335
+
336
+ Returns:
337
+ `Tuple(str)`: Paths to the files saved.
338
+ """
339
+ if not os.path.isdir(save_directory):
340
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
341
+ return
342
+ out_vocab_file = os.path.join(
343
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
344
+ )
345
+
346
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
347
+ copyfile(self.vocab_file, out_vocab_file)
348
+ elif not os.path.isfile(self.vocab_file):
349
+ with open(out_vocab_file, "wb") as fi:
350
+ content_spiece_model = self.sp_model.serialized_model_proto()
351
+ fi.write(content_spiece_model)
352
+
353
+ return (out_vocab_file,)
354
+
355
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
356
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
357
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
358
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
359
+
360
+ output = bos_token_id + token_ids_0 + eos_token_id
361
+
362
+ if token_ids_1 is not None:
363
+ output = output + bos_token_id + token_ids_1 + eos_token_id
364
+
365
+ return output
366
+
367
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask
368
+ def get_special_tokens_mask(
369
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
370
+ ) -> List[int]:
371
+ """
372
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
373
+ special tokens using the tokenizer `prepare_for_model` method.
374
+
375
+ Args:
376
+ token_ids_0 (`List[int]`):
377
+ List of IDs.
378
+ token_ids_1 (`List[int]`, *optional*):
379
+ Optional second list of IDs for sequence pairs.
380
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
381
+ Whether or not the token list is already formatted with special tokens for the model.
382
+
383
+ Returns:
384
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
385
+ """
386
+ if already_has_special_tokens:
387
+ return super().get_special_tokens_mask(
388
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
389
+ )
390
+
391
+ bos_token_id = [1] if self.add_bos_token else []
392
+ eos_token_id = [1] if self.add_eos_token else []
393
+
394
+ if token_ids_1 is None:
395
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
396
+ return (
397
+ bos_token_id
398
+ + ([0] * len(token_ids_0))
399
+ + eos_token_id
400
+ + bos_token_id
401
+ + ([0] * len(token_ids_1))
402
+ + eos_token_id
403
+ )
404
+
405
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.create_token_type_ids_from_sequences
406
+ def create_token_type_ids_from_sequences(
407
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
408
+ ) -> List[int]:
409
+ """
410
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
411
+ sequence pair mask has the following format:
412
+
413
+ ```
414
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
415
+ | first sequence | second sequence |
416
+ ```
417
+
418
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
419
+
420
+ Args:
421
+ token_ids_0 (`List[int]`):
422
+ List of ids.
423
+ token_ids_1 (`List[int]`, *optional*):
424
+ Optional second list of IDs for sequence pairs.
425
+
426
+ Returns:
427
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
428
+ """
429
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
430
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
431
+
432
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
433
+
434
+ if token_ids_1 is not None:
435
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
436
+
437
+ return output
438
+
439
+ @property
440
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.default_chat_template
441
+ def default_chat_template(self):
442
+ """
443
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
444
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
445
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
446
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
447
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
448
+ to fine-tune a model with more flexible role ordering!
449
+
450
+ The output should look something like:
451
+
452
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
453
+ <bos>[INST] Prompt [/INST]
454
+
455
+ The reference for this chat template is [this code
456
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
457
+ in the original repository.
458
+ """
459
+ logger.warning_once(
460
+ "\nNo chat template is defined for this tokenizer - using the default template "
461
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
462
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
463
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
464
+ )
465
+ template = (
466
+ "{% if messages[0]['role'] == 'system' %}"
467
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
468
+ "{% set system_message = messages[0]['content'] %}"
469
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
470
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
471
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
472
+ "{% else %}"
473
+ "{% set loop_messages = messages %}"
474
+ "{% set system_message = false %}"
475
+ "{% endif %}"
476
+ "{% for message in loop_messages %}" # Loop over all non-system messages
477
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
478
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
479
+ "{% endif %}"
480
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
481
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
482
+ "{% else %}"
483
+ "{% set content = message['content'] %}"
484
+ "{% endif %}"
485
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
486
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
487
+ "{% elif message['role'] == 'system' %}"
488
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
489
+ "{% elif message['role'] == 'assistant' %}"
490
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
491
+ "{% endif %}"
492
+ "{% endfor %}"
493
+ )
494
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
495
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
496
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
497
+
498
+ return template
499
+
500
+ def __getstate__(self):
501
+ state = self.__dict__.copy()
502
+ state["sp_model"] = None
503
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
504
+ return state
505
+
506
+ def __setstate__(self, d):
507
+ self.__dict__ = d
508
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
509
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
llmeval-env/lib/python3.10/site-packages/transformers/models/code_llama/tokenization_code_llama_fast.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from shutil import copyfile
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers, processors
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from ...utils import is_sentencepiece_available, logging
23
+ from ...utils.versions import require_version
24
+
25
+
26
+ require_version("tokenizers>=0.13.3")
27
+
28
+ if is_sentencepiece_available():
29
+ from .tokenization_code_llama import CodeLlamaTokenizer
30
+ else:
31
+ CodeLlamaTokenizer = None
32
+
33
+ logger = logging.get_logger(__name__)
34
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+ SPIECE_UNDERLINE = "▁"
37
+
38
+
39
+ B_INST, E_INST = "[INST]", "[/INST]"
40
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
41
+
42
+ # fmt: off
43
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
44
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
45
+ that your responses are socially unbiased and positive in nature.
46
+
47
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
48
+ correct. If you don't know the answer to a question, please don't share false information."""
49
+ # fmt: on
50
+
51
+
52
+ class CodeLlamaTokenizerFast(PreTrainedTokenizerFast):
53
+ """
54
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
55
+
56
+ This uses notably ByteFallback and no normalization.
57
+
58
+ ```python
59
+ >>> from transformers import CodeLlamaTokenizerFast
60
+
61
+ >>> tokenizer = CodeLlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
62
+ >>> tokenizer.encode("Hello this is a test")
63
+ [1, 15043, 445, 338, 263, 1243]
64
+ ```
65
+
66
+ If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
67
+ call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
68
+ values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
69
+ [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
70
+
71
+
72
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
73
+ refer to this superclass for more information regarding those methods. The default configuration match that of
74
+ [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/blob/main/tokenizer_config.json)
75
+ which supports prompt infilling.
76
+
77
+ Args:
78
+ vocab_file (`str`, *optional*):
79
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
80
+ contains the vocabulary necessary to instantiate a tokenizer.
81
+ tokenizer_file (`str`, *optional*):
82
+ [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
83
+ contains everything needed to load the tokenizer.
84
+ clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):
85
+ Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra
86
+ spaces.
87
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
88
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
89
+ token instead.
90
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
91
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
92
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
93
+ The end of sequence token.
94
+ prefix_token (`str`, *optional*, defaults to `"▁<PRE>"`):
95
+ Prefix token used for infilling.
96
+ middle_token (`str`, *optional*, defaults to `"▁<MID>"`):
97
+ Middle token used for infilling.
98
+ suffix_token (`str`, *optional*, defaults to `"▁<SUF>"`):
99
+ Suffix token used for infilling.
100
+ eot_token (`str`, *optional*, defaults to `"▁<EOT>"`):
101
+ End of text token used for infilling.
102
+ fill_token (`str`, *optional*, defaults to `"<FILL_ME>"`):
103
+ The token used to split the input between the prefix and suffix.
104
+ additional_special_tokens (`List[str]`, *optional*):
105
+ Additional special tokens used by the tokenizer.
106
+ add_bos_token (`bool`, *optional*, defaults to `True`):
107
+ Whether to add a beginning of sequence token at the start of sequences.
108
+ add_eos_token (`bool`, *optional*, defaults to `False`):
109
+ Whether to add an end of sequence token at the end of sequences.
110
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
111
+ Whether or not the default system prompt for Llama should be used.
112
+ """
113
+
114
+ vocab_files_names = VOCAB_FILES_NAMES
115
+ slow_tokenizer_class = CodeLlamaTokenizer
116
+ padding_side = "left"
117
+ model_input_names = ["input_ids", "attention_mask"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_file=None,
122
+ tokenizer_file=None,
123
+ clean_up_tokenization_spaces=False,
124
+ unk_token="<unk>",
125
+ bos_token="<s>",
126
+ eos_token="</s>",
127
+ prefix_token="▁<PRE>",
128
+ middle_token="▁<MID>",
129
+ suffix_token="▁<SUF>",
130
+ eot_token="▁<EOT>",
131
+ fill_token="<FILL_ME>",
132
+ additional_special_tokens=None,
133
+ add_bos_token=True,
134
+ add_eos_token=False,
135
+ use_default_system_prompt=False,
136
+ **kwargs,
137
+ ):
138
+ # mark tokens special to skip them
139
+ additional_special_tokens = additional_special_tokens or []
140
+ for token in [prefix_token, middle_token, suffix_token, eot_token]:
141
+ additional_special_tokens += [token] if token is not None else []
142
+ self.use_default_system_prompt = use_default_system_prompt
143
+
144
+ super().__init__(
145
+ vocab_file=vocab_file,
146
+ tokenizer_file=tokenizer_file,
147
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
148
+ additional_special_tokens=additional_special_tokens,
149
+ unk_token=unk_token,
150
+ bos_token=bos_token,
151
+ eos_token=eos_token,
152
+ add_bos_token=add_bos_token,
153
+ add_eos_token=add_eos_token,
154
+ prefix_token=prefix_token,
155
+ middle_token=middle_token,
156
+ suffix_token=suffix_token,
157
+ eot_token=eot_token,
158
+ fill_token=fill_token,
159
+ use_default_system_prompt=use_default_system_prompt,
160
+ **kwargs,
161
+ )
162
+ self._add_bos_token = add_bos_token
163
+ self._add_eos_token = add_eos_token
164
+ self.update_post_processor()
165
+
166
+ self.vocab_file = vocab_file
167
+
168
+ self._prefix_token = prefix_token
169
+ self._middle_token = middle_token
170
+ self._suffix_token = suffix_token
171
+ self._eot_token = eot_token
172
+ self.fill_token = fill_token
173
+
174
+ @property
175
+ def can_save_slow_tokenizer(self) -> bool:
176
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
177
+
178
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.update_post_processor
179
+ def update_post_processor(self):
180
+ """
181
+ Updates the underlying post processor with the current `bos_token` and `eos_token`.
182
+ """
183
+ bos = self.bos_token
184
+ bos_token_id = self.bos_token_id
185
+ if bos is None and self.add_bos_token:
186
+ raise ValueError("add_bos_token = True but bos_token = None")
187
+
188
+ eos = self.eos_token
189
+ eos_token_id = self.eos_token_id
190
+ if eos is None and self.add_eos_token:
191
+ raise ValueError("add_eos_token = True but eos_token = None")
192
+
193
+ single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
194
+ pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
195
+
196
+ special_tokens = []
197
+ if self.add_bos_token:
198
+ special_tokens.append((bos, bos_token_id))
199
+ if self.add_eos_token:
200
+ special_tokens.append((eos, eos_token_id))
201
+ self._tokenizer.post_processor = processors.TemplateProcessing(
202
+ single=single, pair=pair, special_tokens=special_tokens
203
+ )
204
+
205
+ @property
206
+ def prefix_token(self):
207
+ return self._prefix_token
208
+
209
+ @property
210
+ def prefix_id(self):
211
+ if self._prefix_token is None:
212
+ return None
213
+ return self.convert_tokens_to_ids(self.prefix_token)
214
+
215
+ @property
216
+ def middle_token(self):
217
+ return self._middle_token
218
+
219
+ @property
220
+ def middle_id(self):
221
+ if self._middle_token is None:
222
+ return None
223
+ return self.convert_tokens_to_ids(self.middle_token)
224
+
225
+ @property
226
+ def suffix_token(self):
227
+ return self._suffix_token
228
+
229
+ @property
230
+ def suffix_id(self):
231
+ if self._suffix_token is None:
232
+ return None
233
+ return self.convert_tokens_to_ids(self.suffix_token)
234
+
235
+ @property
236
+ def eot_id(self):
237
+ if self._eot_token is None:
238
+ return None
239
+ return self.convert_tokens_to_ids(self.eot_token)
240
+
241
+ @property
242
+ def eot_token(self):
243
+ return self._eot_token
244
+
245
+ @property
246
+ def add_eos_token(self):
247
+ return self._add_eos_token
248
+
249
+ @property
250
+ def add_bos_token(self):
251
+ return self._add_bos_token
252
+
253
+ @add_eos_token.setter
254
+ def add_eos_token(self, value):
255
+ self._add_eos_token = value
256
+ self.update_post_processor()
257
+
258
+ @add_bos_token.setter
259
+ def add_bos_token(self, value):
260
+ self._add_bos_token = value
261
+ self.update_post_processor()
262
+
263
+ def set_infilling_processor(self, reset, suffix_first=False, add_special_tokens=True):
264
+ """
265
+ Updates the normalizer to make sure the prompt format for `infilling` is respected. The infilling format is the
266
+ following: if suffix_first
267
+ " <PRE> <SUF>{suf} <MID> {pre}"
268
+ else:
269
+ " <PRE> {pre} <SUF>{suf} <MID>"
270
+
271
+ If `reset` is set to `True`, the `normalizer` and `post_processor` are reset to their "normal" behaviour, which
272
+ is to add a prefix space for the normalizer, and add a `bos_token` to the input text for the `post_processor`.
273
+ """
274
+ if reset:
275
+ self._tokenizer.normalizer = normalizers.Sequence(
276
+ [
277
+ normalizers.Prepend(prepend="▁"),
278
+ normalizers.Replace(pattern=" ", content="▁"),
279
+ ]
280
+ )
281
+ self.update_post_processor()
282
+ return
283
+
284
+ self._tokenizer.normalizer = normalizers.Replace(pattern=" ", content="▁")
285
+ pair = [self.bos_token] if self.add_bos_token and add_special_tokens else []
286
+ special_tokens = [(self.bos_token, self.bos_token_id)] if self.add_bos_token and add_special_tokens else []
287
+ if suffix_first:
288
+ # format as " <PRE> <SUF>{suf} <MID> {pre}"
289
+ pair += [self.prefix_token, self.suffix_token, "$B", self.middle_token, "$A"]
290
+ special_tokens += [
291
+ (self.prefix_token, self.prefix_id),
292
+ (self.suffix_token, self.suffix_id),
293
+ (self.middle_token, self.middle_id),
294
+ ]
295
+ else:
296
+ # format as " <PRE> {pre} <SUF>{suf} <MID>"
297
+ pair += [self.prefix_token, "$A", self.suffix_token, "$B", self.middle_token]
298
+ special_tokens += [
299
+ (self.prefix_token, self.prefix_id),
300
+ (self.suffix_token, self.suffix_id),
301
+ (self.middle_token, self.middle_id),
302
+ ]
303
+
304
+ if self.add_eos_token and add_special_tokens:
305
+ pair += [self.eos_token]
306
+ special_tokens += [(self.eos_token, self.eos_token_id)]
307
+ self._tokenizer.post_processor = processors.TemplateProcessing(
308
+ single="$A", pair=pair, special_tokens=special_tokens
309
+ )
310
+
311
+ def encode_plus(self, text, text_pair=None, suffix_first=False, add_special_tokens=True, **kwargs):
312
+ # hack to make sure the input is pre-process but outside rust
313
+ text_pair = kwargs.pop("suffix", text_pair)
314
+ if self.fill_token is not None and self.fill_token in text and text_pair is None:
315
+ text, text_pair = text.split(self.fill_token)
316
+
317
+ if text_pair is None or len(text_pair) < 1:
318
+ return super().encode_plus(text, text_pair, add_special_tokens=add_special_tokens, **kwargs)
319
+
320
+ if None in (self.prefix_id, self.middle_id, self.suffix_id):
321
+ raise ValueError(
322
+ "Then input includes a `prefix` and a `suffix` used for the infilling task,"
323
+ " the `prefix_id, middle_id, suffix_id` must all be initialized. Current"
324
+ f" values : {self.prefix_id, self.middle_id, self.suffix_id}"
325
+ )
326
+
327
+ self.set_infilling_processor(False, suffix_first=suffix_first, add_special_tokens=add_special_tokens)
328
+ tokens = super().encode_plus(" " + text, text_pair=text_pair, add_special_tokens=True, **kwargs)
329
+ self.set_infilling_processor(True)
330
+ return tokens
331
+
332
+ # Copied from transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast.save_vocabulary
333
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
334
+ if not self.can_save_slow_tokenizer:
335
+ raise ValueError(
336
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
337
+ "tokenizer."
338
+ )
339
+
340
+ if not os.path.isdir(save_directory):
341
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
342
+ return
343
+ out_vocab_file = os.path.join(
344
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
345
+ )
346
+
347
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
348
+ copyfile(self.vocab_file, out_vocab_file)
349
+
350
+ return (out_vocab_file,)
351
+
352
+ @property
353
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.default_chat_template
354
+ def default_chat_template(self):
355
+ """
356
+ LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
357
+ Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
358
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
359
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
360
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
361
+ to fine-tune a model with more flexible role ordering!
362
+
363
+ The output should look something like:
364
+
365
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
366
+ <bos>[INST] Prompt [/INST]
367
+
368
+ The reference for this chat template is [this code
369
+ snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
370
+ in the original repository.
371
+ """
372
+ logger.warning_once(
373
+ "\nNo chat template is defined for this tokenizer - using the default template "
374
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
375
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
376
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
377
+ )
378
+ template = (
379
+ "{% if messages[0]['role'] == 'system' %}"
380
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
381
+ "{% set system_message = messages[0]['content'] %}"
382
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
383
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
384
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
385
+ "{% else %}"
386
+ "{% set loop_messages = messages %}"
387
+ "{% set system_message = false %}"
388
+ "{% endif %}"
389
+ "{% for message in loop_messages %}" # Loop over all non-system messages
390
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
391
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
392
+ "{% endif %}"
393
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
394
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
395
+ "{% else %}"
396
+ "{% set content = message['content'] %}"
397
+ "{% endif %}"
398
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
399
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
400
+ "{% elif message['role'] == 'system' %}"
401
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
402
+ "{% elif message['role'] == 'assistant' %}"
403
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
404
+ "{% endif %}"
405
+ "{% endfor %}"
406
+ )
407
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
408
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
409
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
410
+
411
+ return template
412
+
413
+ def build_inputs_with_special_tokens(
414
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
415
+ ) -> List[int]:
416
+ """
417
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
418
+ adding special tokens. The special tokens depend on calling set_lang.
419
+
420
+ An NLLB sequence has the following format, where `X` represents the sequence:
421
+
422
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
423
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
424
+
425
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
426
+ separator.
427
+
428
+ Args:
429
+ token_ids_0 (`List[int]`):
430
+ List of IDs to which the special tokens will be added.
431
+ token_ids_1 (`List[int]`, *optional*):
432
+ Optional second list of IDs for sequence pairs.
433
+
434
+ Returns:
435
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
436
+ """
437
+ if token_ids_1 is None:
438
+ return self.bos_token_id + token_ids_0 + self.eos_token_id
439
+ return self.bos_token_id + token_ids_0 + token_ids_1 + self.eos_token_id
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenOnnxConfig"],
21
+ "tokenization_codegen": ["CodeGenTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_codegen"] = [
39
+ "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "CodeGenForCausalLM",
41
+ "CodeGenModel",
42
+ "CodeGenPreTrainedModel",
43
+ ]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenOnnxConfig
47
+ from .tokenization_codegen import CodeGenTokenizer
48
+
49
+ try:
50
+ if not is_tokenizers_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .tokenization_codegen_fast import CodeGenTokenizerFast
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_codegen import (
64
+ CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ CodeGenForCausalLM,
66
+ CodeGenModel,
67
+ CodeGenPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.25 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/configuration_codegen.cpython-310.pyc ADDED
Binary file (8.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/modeling_codegen.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/__pycache__/tokenization_codegen_fast.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/configuration_codegen.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CodeGen model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class CodeGenConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`CodeGenModel`]. It is used to instantiate a
34
+ CodeGen model according to the specified arguments, defining the model architecture. Instantiating a configuration
35
+ with the defaults will yield a similar configuration to that of the CodeGen
36
+ [Salesforce/codegen-2B-mono](https://huggingface.co/Salesforce/codegen-2B-mono) architecture. Configuration objects
37
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
38
+ [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50400):
42
+ Vocabulary size of the CodeGen model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`CodeGenModel`].
44
+ n_positions (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ n_ctx (`int`, *optional*, defaults to 2048):
48
+ This attribute is used in `CodeGenModel.__init__` without any real effect.
49
+ n_embd (`int`, *optional*, defaults to 4096):
50
+ Dimensionality of the embeddings and hidden states.
51
+ n_layer (`int`, *optional*, defaults to 28):
52
+ Number of hidden layers in the Transformer encoder.
53
+ n_head (`int`, *optional*, defaults to 16):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ rotary_dim (`int`, *optional*, defaults to 64):
56
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
57
+ n_inner (`int`, *optional*):
58
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
59
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
60
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
61
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the embeddings.
65
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for the attention.
67
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon to use in the layer normalization layers.
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether or not the model should return the last key/values attentions (not used by all models).
73
+ bos_token_id (`int`, *optional*, defaults to 50256):
74
+ Beginning of stream token id.
75
+ eos_token_id (`int`, *optional*, defaults to 50256):
76
+ End of stream token id.
77
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
78
+ Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
79
+ model has a output word embedding layer.
80
+
81
+ Example:
82
+
83
+ ```python
84
+ >>> from transformers import CodeGenConfig, CodeGenModel
85
+
86
+ >>> # Initializing a CodeGen 6B configuration
87
+ >>> configuration = CodeGenConfig()
88
+
89
+ >>> # Initializing a model (with random weights) from the configuration
90
+ >>> model = CodeGenModel(configuration)
91
+
92
+ >>> # Accessing the model configuration
93
+ >>> configuration = model.config
94
+ ```"""
95
+
96
+ model_type = "codegen"
97
+ attribute_map = {
98
+ "max_position_embeddings": "n_positions",
99
+ "hidden_size": "n_embd",
100
+ "num_attention_heads": "n_head",
101
+ "num_hidden_layers": "n_layer",
102
+ }
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=50400,
107
+ n_positions=2048,
108
+ n_ctx=2048,
109
+ n_embd=4096,
110
+ n_layer=28,
111
+ n_head=16,
112
+ rotary_dim=64,
113
+ n_inner=None,
114
+ activation_function="gelu_new",
115
+ resid_pdrop=0.0,
116
+ embd_pdrop=0.0,
117
+ attn_pdrop=0.0,
118
+ layer_norm_epsilon=1e-5,
119
+ initializer_range=0.02,
120
+ use_cache=True,
121
+ bos_token_id=50256,
122
+ eos_token_id=50256,
123
+ tie_word_embeddings=False,
124
+ **kwargs,
125
+ ):
126
+ self.vocab_size = vocab_size
127
+ self.n_ctx = n_ctx
128
+ self.n_positions = n_positions
129
+ self.n_embd = n_embd
130
+ self.n_layer = n_layer
131
+ self.n_head = n_head
132
+ self.n_inner = n_inner
133
+ self.rotary_dim = rotary_dim
134
+ self.activation_function = activation_function
135
+ self.resid_pdrop = resid_pdrop
136
+ self.embd_pdrop = embd_pdrop
137
+ self.attn_pdrop = attn_pdrop
138
+ self.layer_norm_epsilon = layer_norm_epsilon
139
+ self.initializer_range = initializer_range
140
+ self.use_cache = use_cache
141
+
142
+ self.bos_token_id = bos_token_id
143
+ self.eos_token_id = eos_token_id
144
+
145
+ super().__init__(
146
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
147
+ )
148
+
149
+
150
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
151
+ class CodeGenOnnxConfig(OnnxConfigWithPast):
152
+ def __init__(
153
+ self,
154
+ config: PretrainedConfig,
155
+ task: str = "default",
156
+ patching_specs: List[PatchingSpec] = None,
157
+ use_past: bool = False,
158
+ ):
159
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
160
+ if not getattr(self._config, "pad_token_id", None):
161
+ # TODO: how to do that better?
162
+ self._config.pad_token_id = 0
163
+
164
+ @property
165
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
166
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
167
+ if self.use_past:
168
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
169
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
170
+ else:
171
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
172
+
173
+ return common_inputs
174
+
175
+ @property
176
+ def num_layers(self) -> int:
177
+ return self._config.n_layer
178
+
179
+ @property
180
+ def num_attention_heads(self) -> int:
181
+ return self._config.n_head
182
+
183
+ def generate_dummy_inputs(
184
+ self,
185
+ tokenizer: PreTrainedTokenizer,
186
+ batch_size: int = -1,
187
+ seq_length: int = -1,
188
+ is_pair: bool = False,
189
+ framework: Optional[TensorType] = None,
190
+ ) -> Mapping[str, Any]:
191
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
192
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
193
+ )
194
+
195
+ # We need to order the input in the way they appears in the forward()
196
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
197
+
198
+ # Need to add the past_keys
199
+ if self.use_past:
200
+ if not is_torch_available():
201
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
202
+ else:
203
+ import torch
204
+
205
+ batch, seqlen = common_inputs["input_ids"].shape
206
+ # Not using the same length for past_key_values
207
+ past_key_values_length = seqlen + 2
208
+ past_shape = (
209
+ batch,
210
+ self.num_attention_heads,
211
+ past_key_values_length,
212
+ self._config.hidden_size // self.num_attention_heads,
213
+ )
214
+ ordered_inputs["past_key_values"] = [
215
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
216
+ ]
217
+
218
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
219
+ if self.use_past:
220
+ mask_dtype = ordered_inputs["attention_mask"].dtype
221
+ ordered_inputs["attention_mask"] = torch.cat(
222
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
223
+ )
224
+
225
+ return ordered_inputs
226
+
227
+ @property
228
+ def default_onnx_opset(self) -> int:
229
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/modeling_codegen.py ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CodeGen model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import CrossEntropyLoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
28
+ from .configuration_codegen import CodeGenConfig
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ _CHECKPOINT_FOR_DOC = "Salesforce/codegen-2B-mono"
34
+ _CONFIG_FOR_DOC = "CodeGenConfig"
35
+
36
+
37
+ from ..deprecated._archive_maps import CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
38
+
39
+
40
+ # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
41
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
42
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
43
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
44
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
45
+
46
+
47
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
48
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
49
+ x1 = x[:, :, :, ::2]
50
+ x2 = x[:, :, :, 1::2]
51
+ x = torch.stack((-x2, x1), dim=-1)
52
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
53
+
54
+
55
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
56
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
57
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
58
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
59
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
60
+
61
+
62
+ class CodeGenAttention(nn.Module):
63
+ def __init__(self, config):
64
+ super().__init__()
65
+
66
+ max_positions = config.max_position_embeddings
67
+ self.register_buffer(
68
+ "causal_mask",
69
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
70
+ 1, 1, max_positions, max_positions
71
+ ),
72
+ persistent=False,
73
+ )
74
+
75
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
76
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
77
+
78
+ self.embed_dim = config.hidden_size
79
+ self.num_attention_heads = config.num_attention_heads
80
+ self.head_dim = self.embed_dim // self.num_attention_heads
81
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
82
+ raise ValueError(
83
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
84
+ f" `num_attention_heads`: {self.num_attention_heads})."
85
+ )
86
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
87
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
88
+
89
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
90
+ self.rotary_dim = config.rotary_dim
91
+ pos_embd_dim = self.rotary_dim or self.embed_dim
92
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
93
+
94
+ def _split_heads(self, x, n_head, dim_head, mp_num):
95
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
96
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
97
+ return reshaped
98
+
99
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
100
+ """
101
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
102
+ """
103
+ if len(tensor.shape) == 5:
104
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
105
+ elif len(tensor.shape) == 4:
106
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
107
+ else:
108
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
109
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
110
+ return tensor.view(new_shape)
111
+
112
+ def _attn(
113
+ self,
114
+ query,
115
+ key,
116
+ value,
117
+ attention_mask=None,
118
+ head_mask=None,
119
+ ):
120
+ # compute causal mask from causal mask buffer
121
+ query_length, key_length = query.size(-2), key.size(-2)
122
+ causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
123
+
124
+ # Keep the attention weights computation in fp32 to avoid overflow issues
125
+ query = query.to(torch.float32)
126
+ key = key.to(torch.float32)
127
+
128
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
129
+
130
+ attn_weights = attn_weights / self.scale_attn
131
+ mask_value = torch.finfo(attn_weights.dtype).min
132
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
133
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
134
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
135
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
136
+
137
+ if attention_mask is not None:
138
+ # Apply the attention mask
139
+ attn_weights = attn_weights + attention_mask
140
+
141
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
142
+ attn_weights = attn_weights.to(value.dtype)
143
+ attn_weights = self.attn_dropout(attn_weights)
144
+
145
+ # Mask heads if we want to
146
+ if head_mask is not None:
147
+ attn_weights = attn_weights * head_mask
148
+
149
+ attn_output = torch.matmul(attn_weights, value)
150
+
151
+ return attn_output, attn_weights
152
+
153
+ def forward(
154
+ self,
155
+ hidden_states: Optional[torch.FloatTensor],
156
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
157
+ attention_mask: Optional[torch.FloatTensor] = None,
158
+ position_ids: Optional[torch.LongTensor] = None,
159
+ head_mask: Optional[torch.FloatTensor] = None,
160
+ use_cache: Optional[bool] = False,
161
+ output_attentions: Optional[bool] = False,
162
+ ) -> Union[
163
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
164
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
165
+ ]:
166
+ qkv = self.qkv_proj(hidden_states)
167
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
168
+ mp_num = 4
169
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
170
+
171
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
172
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
173
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
174
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
175
+
176
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
177
+ value = value.permute(0, 2, 1, 3)
178
+
179
+ embed_positions = self.embed_positions
180
+ if embed_positions.device != position_ids.device:
181
+ embed_positions = embed_positions.to(position_ids.device)
182
+ self.embed_positions = embed_positions
183
+
184
+ sincos = embed_positions[position_ids]
185
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
186
+
187
+ if self.rotary_dim is not None:
188
+ k_rot = key[:, :, :, : self.rotary_dim]
189
+ k_pass = key[:, :, :, self.rotary_dim :]
190
+
191
+ q_rot = query[:, :, :, : self.rotary_dim]
192
+ q_pass = query[:, :, :, self.rotary_dim :]
193
+
194
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
195
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
196
+
197
+ key = torch.cat([k_rot, k_pass], dim=-1)
198
+ query = torch.cat([q_rot, q_pass], dim=-1)
199
+ else:
200
+ key = apply_rotary_pos_emb(key, sin, cos)
201
+ query = apply_rotary_pos_emb(query, sin, cos)
202
+
203
+ key = key.permute(0, 2, 1, 3)
204
+ query = query.permute(0, 2, 1, 3)
205
+
206
+ if layer_past is not None:
207
+ past_key = layer_past[0]
208
+ past_value = layer_past[1]
209
+ key = torch.cat((past_key, key), dim=-2)
210
+ value = torch.cat((past_value, value), dim=-2)
211
+
212
+ if use_cache is True:
213
+ # Note that this cast is quite ugly, but is not implemented before ROPE as k_rot in the original codebase is always in fp32.
214
+ # Reference: https://github.com/salesforce/CodeGen/blob/f210c3bb1216c975ad858cd4132c0fdeabf4bfc2/codegen1/jaxformer/hf/codegen/modeling_codegen.py#L38
215
+ present = (key.to(hidden_states.dtype), value)
216
+ else:
217
+ present = None
218
+
219
+ # compute self-attention: V x Softmax(QK^T)
220
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
221
+
222
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
223
+ attn_output = self.out_proj(attn_output)
224
+ attn_output = self.resid_dropout(attn_output)
225
+
226
+ outputs = (attn_output, present)
227
+ if output_attentions:
228
+ outputs += (attn_weights,)
229
+
230
+ return outputs # a, present, (attentions)
231
+
232
+
233
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->CodeGen
234
+ class CodeGenMLP(nn.Module):
235
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
236
+ super().__init__()
237
+ embed_dim = config.n_embd
238
+
239
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
240
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
241
+
242
+ self.act = ACT2FN[config.activation_function]
243
+ self.dropout = nn.Dropout(config.resid_pdrop)
244
+
245
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
246
+ hidden_states = self.fc_in(hidden_states)
247
+ hidden_states = self.act(hidden_states)
248
+ hidden_states = self.fc_out(hidden_states)
249
+ hidden_states = self.dropout(hidden_states)
250
+ return hidden_states
251
+
252
+
253
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
254
+ class CodeGenBlock(nn.Module):
255
+ # Ignore copy
256
+ def __init__(self, config):
257
+ super().__init__()
258
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
259
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
260
+ self.attn = CodeGenAttention(config)
261
+ self.mlp = CodeGenMLP(inner_dim, config)
262
+
263
+ def forward(
264
+ self,
265
+ hidden_states: Optional[torch.FloatTensor],
266
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
267
+ attention_mask: Optional[torch.FloatTensor] = None,
268
+ position_ids: Optional[torch.LongTensor] = None,
269
+ head_mask: Optional[torch.FloatTensor] = None,
270
+ use_cache: Optional[bool] = False,
271
+ output_attentions: Optional[bool] = False,
272
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
273
+ residual = hidden_states
274
+ hidden_states = self.ln_1(hidden_states)
275
+ attn_outputs = self.attn(
276
+ hidden_states=hidden_states,
277
+ layer_past=layer_past,
278
+ attention_mask=attention_mask,
279
+ position_ids=position_ids,
280
+ head_mask=head_mask,
281
+ use_cache=use_cache,
282
+ output_attentions=output_attentions,
283
+ )
284
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
285
+ outputs = attn_outputs[1:]
286
+
287
+ feed_forward_hidden_states = self.mlp(hidden_states)
288
+ hidden_states = attn_output + feed_forward_hidden_states + residual
289
+
290
+ if use_cache:
291
+ outputs = (hidden_states,) + outputs
292
+ else:
293
+ outputs = (hidden_states,) + outputs[1:]
294
+
295
+ return outputs # hidden_states, present, (attentions)
296
+
297
+
298
+ class CodeGenPreTrainedModel(PreTrainedModel):
299
+ """
300
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
301
+ models.
302
+ """
303
+
304
+ config_class = CodeGenConfig
305
+ base_model_prefix = "transformer"
306
+ supports_gradient_checkpointing = True
307
+ _no_split_modules = ["CodeGenBlock"]
308
+ _skip_keys_device_placement = "past_key_values"
309
+
310
+ def __init__(self, *inputs, **kwargs):
311
+ super().__init__(*inputs, **kwargs)
312
+
313
+ def _init_weights(self, module):
314
+ """Initialize the weights."""
315
+ if isinstance(module, (nn.Linear,)):
316
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
317
+ # cf https://github.com/pytorch/pytorch/pull/5617
318
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
319
+ if module.bias is not None:
320
+ module.bias.data.zero_()
321
+ elif isinstance(module, nn.Embedding):
322
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
323
+ if module.padding_idx is not None:
324
+ module.weight.data[module.padding_idx].zero_()
325
+ elif isinstance(module, nn.LayerNorm):
326
+ module.bias.data.zero_()
327
+ module.weight.data.fill_(1.0)
328
+
329
+
330
+ CODEGEN_START_DOCSTRING = r"""
331
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
332
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
333
+ behavior.
334
+
335
+ Parameters:
336
+ config ([`CodeGenConfig`]): Model configuration class with all the parameters of the model.
337
+ Initializing with a config file does not load the weights associated with the model, only the
338
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
339
+ """
340
+
341
+ CODEGEN_INPUTS_DOCSTRING = r"""
342
+ Args:
343
+ input_ids (`torch.LongTensor` of shape `({0})`):
344
+ Indices of input sequence tokens in the vocabulary.
345
+
346
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
347
+ [`PreTrainedTokenizer.__call__`] for details.
348
+
349
+ [What are input IDs?](../glossary#input-ids)
350
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
351
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
352
+
353
+ - 1 for tokens that are **not masked**,
354
+ - 0 for tokens that are **masked**.
355
+
356
+ [What are attention masks?](../glossary#attention-mask)
357
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
358
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
359
+ 1]`:
360
+
361
+ - 0 corresponds to a *sentence A* token,
362
+ - 1 corresponds to a *sentence B* token.
363
+
364
+ [What are token type IDs?](../glossary#token-type-ids)
365
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
366
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
367
+ config.n_positions - 1]`.
368
+
369
+ [What are position IDs?](../glossary#position-ids)
370
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
371
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
372
+
373
+ - 1 indicates the head is **not masked**,
374
+ - 0 indicates the head is **masked**.
375
+
376
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
377
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
378
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
379
+ model's internal embedding lookup matrix.
380
+ output_attentions (`bool`, *optional*):
381
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
382
+ tensors for more detail.
383
+ output_hidden_states (`bool`, *optional*):
384
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
385
+ more detail.
386
+ return_dict (`bool`, *optional*):
387
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
388
+ """
389
+
390
+
391
+ @add_start_docstrings(
392
+ "The bare CodeGen Model transformer outputting raw hidden-states without any specific head on top.",
393
+ CODEGEN_START_DOCSTRING,
394
+ )
395
+ class CodeGenModel(CodeGenPreTrainedModel):
396
+ def __init__(self, config):
397
+ super().__init__(config)
398
+
399
+ self.embed_dim = config.n_embd
400
+ self.vocab_size = config.vocab_size
401
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
402
+ self.drop = nn.Dropout(config.embd_pdrop)
403
+ self.h = nn.ModuleList([CodeGenBlock(config) for _ in range(config.n_layer)])
404
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
405
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
406
+
407
+ self.gradient_checkpointing = False
408
+
409
+ # Initialize weights and apply final processing
410
+ self.post_init()
411
+
412
+ def get_input_embeddings(self):
413
+ return self.wte
414
+
415
+ def set_input_embeddings(self, new_embeddings):
416
+ self.wte = new_embeddings
417
+
418
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
419
+ @add_code_sample_docstrings(
420
+ checkpoint=_CHECKPOINT_FOR_DOC,
421
+ output_type=BaseModelOutputWithPast,
422
+ config_class=_CONFIG_FOR_DOC,
423
+ )
424
+ def forward(
425
+ self,
426
+ input_ids: Optional[torch.LongTensor] = None,
427
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
428
+ attention_mask: Optional[torch.FloatTensor] = None,
429
+ token_type_ids: Optional[torch.LongTensor] = None,
430
+ position_ids: Optional[torch.LongTensor] = None,
431
+ head_mask: Optional[torch.FloatTensor] = None,
432
+ inputs_embeds: Optional[torch.FloatTensor] = None,
433
+ use_cache: Optional[bool] = None,
434
+ output_attentions: Optional[bool] = None,
435
+ output_hidden_states: Optional[bool] = None,
436
+ return_dict: Optional[bool] = None,
437
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
438
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
439
+ output_hidden_states = (
440
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
441
+ )
442
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
443
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
444
+
445
+ if input_ids is not None and inputs_embeds is not None:
446
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
447
+ elif input_ids is not None:
448
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
449
+ input_shape = input_ids.size()
450
+ input_ids = input_ids.view(-1, input_shape[-1])
451
+ batch_size = input_ids.shape[0]
452
+ elif inputs_embeds is not None:
453
+ input_shape = inputs_embeds.size()[:-1]
454
+ batch_size = inputs_embeds.shape[0]
455
+ else:
456
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
457
+
458
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
459
+
460
+ if token_type_ids is not None:
461
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
462
+
463
+ if past_key_values is None:
464
+ past_length = 0
465
+ past_key_values = tuple([None] * len(self.h))
466
+ else:
467
+ past_length = past_key_values[0][0].size(-2)
468
+
469
+ if position_ids is None:
470
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
471
+ position_ids = position_ids.unsqueeze(0)
472
+
473
+ # Attention mask.
474
+ if attention_mask is not None:
475
+ if batch_size <= 0:
476
+ raise ValueError("batch_size has to be defined and > 0")
477
+ attention_mask = attention_mask.view(batch_size, -1)
478
+ # We create a 3D attention mask from a 2D tensor mask.
479
+ # Sizes are [batch_size, 1, 1, to_seq_length]
480
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
481
+ # this attention mask is more simple than the triangular masking of causal attention
482
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
483
+ attention_mask = attention_mask[:, None, None, :]
484
+
485
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
486
+ # masked positions, this operation will create a tensor which is 0.0 for
487
+ # positions we want to attend and the dtype's smallest value for masked positions.
488
+ # Since we are adding it to the raw scores before the softmax, this is
489
+ # effectively the same as removing these entirely.
490
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
491
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
492
+
493
+ # Prepare head mask if needed
494
+ # 1.0 in head_mask indicate we keep the head
495
+ # attention_probs has shape bsz x num_attention_heads x N x N
496
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
497
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
498
+
499
+ if inputs_embeds is None:
500
+ inputs_embeds = self.wte(input_ids)
501
+
502
+ hidden_states = inputs_embeds
503
+
504
+ if token_type_ids is not None:
505
+ token_type_embeds = self.wte(token_type_ids)
506
+ hidden_states = hidden_states + token_type_embeds
507
+
508
+ hidden_states = self.drop(hidden_states)
509
+
510
+ output_shape = input_shape + (hidden_states.size(-1),)
511
+
512
+ if self.gradient_checkpointing and self.training:
513
+ if use_cache:
514
+ logger.warning_once(
515
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
516
+ "`use_cache=False`..."
517
+ )
518
+ use_cache = False
519
+
520
+ presents = () if use_cache else None
521
+ all_self_attentions = () if output_attentions else None
522
+ all_hidden_states = () if output_hidden_states else None
523
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
524
+ if output_hidden_states:
525
+ all_hidden_states = all_hidden_states + (hidden_states,)
526
+
527
+ if self.gradient_checkpointing and self.training:
528
+ outputs = self._gradient_checkpointing_func(
529
+ block.__call__,
530
+ hidden_states,
531
+ None,
532
+ attention_mask,
533
+ position_ids,
534
+ head_mask[i],
535
+ use_cache,
536
+ output_attentions,
537
+ )
538
+ else:
539
+ outputs = block(
540
+ hidden_states=hidden_states,
541
+ layer_past=layer_past,
542
+ attention_mask=attention_mask,
543
+ position_ids=position_ids,
544
+ head_mask=head_mask[i],
545
+ use_cache=use_cache,
546
+ output_attentions=output_attentions,
547
+ )
548
+
549
+ hidden_states = outputs[0]
550
+ if use_cache is True:
551
+ presents = presents + (outputs[1],)
552
+
553
+ if output_attentions:
554
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
555
+
556
+ hidden_states = self.ln_f(hidden_states)
557
+
558
+ hidden_states = hidden_states.view(output_shape)
559
+ # Add last hidden state
560
+ if output_hidden_states:
561
+ all_hidden_states = all_hidden_states + (hidden_states,)
562
+
563
+ if not return_dict:
564
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
565
+
566
+ return BaseModelOutputWithPast(
567
+ last_hidden_state=hidden_states,
568
+ past_key_values=presents,
569
+ hidden_states=all_hidden_states,
570
+ attentions=all_self_attentions,
571
+ )
572
+
573
+
574
+ @add_start_docstrings(
575
+ """
576
+ The CodeGen Model transformer with a language modeling head on top.
577
+ """,
578
+ CODEGEN_START_DOCSTRING,
579
+ )
580
+ class CodeGenForCausalLM(CodeGenPreTrainedModel):
581
+ _tied_weights_keys = ["lm_head.weight"]
582
+
583
+ def __init__(self, config):
584
+ super().__init__(config)
585
+ self.transformer = CodeGenModel(config)
586
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
587
+
588
+ # Initialize weights and apply final processing
589
+ self.post_init()
590
+
591
+ def get_output_embeddings(self):
592
+ return self.lm_head
593
+
594
+ def set_output_embeddings(self, new_embeddings):
595
+ self.lm_head = new_embeddings
596
+
597
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
598
+ token_type_ids = kwargs.get("token_type_ids", None)
599
+ # Omit tokens covered by past_key_values
600
+ if past_key_values:
601
+ past_length = past_key_values[0][0].shape[2]
602
+
603
+ # Some generation methods already pass only the last input ID
604
+ if input_ids.shape[1] > past_length:
605
+ remove_prefix_length = past_length
606
+ else:
607
+ # Default to old behavior: keep only final ID
608
+ remove_prefix_length = input_ids.shape[1] - 1
609
+
610
+ input_ids = input_ids[:, remove_prefix_length:]
611
+ if token_type_ids is not None:
612
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
613
+
614
+ attention_mask = kwargs.get("attention_mask", None)
615
+ position_ids = kwargs.get("position_ids", None)
616
+
617
+ if attention_mask is not None and position_ids is None:
618
+ # create position_ids on the fly for batch generation
619
+ position_ids = attention_mask.long().cumsum(-1) - 1
620
+ position_ids.masked_fill_(attention_mask == 0, 1)
621
+ if past_key_values:
622
+ position_ids = position_ids[:, -input_ids.shape[1] :]
623
+
624
+ return {
625
+ "input_ids": input_ids,
626
+ "past_key_values": past_key_values,
627
+ "use_cache": kwargs.get("use_cache"),
628
+ "position_ids": position_ids,
629
+ "attention_mask": attention_mask,
630
+ "token_type_ids": token_type_ids,
631
+ }
632
+
633
+ @add_start_docstrings_to_model_forward(CODEGEN_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
634
+ @add_code_sample_docstrings(
635
+ checkpoint=_CHECKPOINT_FOR_DOC,
636
+ output_type=CausalLMOutputWithPast,
637
+ config_class=_CONFIG_FOR_DOC,
638
+ )
639
+ def forward(
640
+ self,
641
+ input_ids: Optional[torch.LongTensor] = None,
642
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
643
+ attention_mask: Optional[torch.FloatTensor] = None,
644
+ token_type_ids: Optional[torch.LongTensor] = None,
645
+ position_ids: Optional[torch.LongTensor] = None,
646
+ head_mask: Optional[torch.FloatTensor] = None,
647
+ inputs_embeds: Optional[torch.FloatTensor] = None,
648
+ labels: Optional[torch.LongTensor] = None,
649
+ use_cache: Optional[bool] = None,
650
+ output_attentions: Optional[bool] = None,
651
+ output_hidden_states: Optional[bool] = None,
652
+ return_dict: Optional[bool] = None,
653
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
654
+ r"""
655
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
656
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
657
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
658
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
659
+ """
660
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
661
+
662
+ transformer_outputs = self.transformer(
663
+ input_ids,
664
+ past_key_values=past_key_values,
665
+ attention_mask=attention_mask,
666
+ token_type_ids=token_type_ids,
667
+ position_ids=position_ids,
668
+ head_mask=head_mask,
669
+ inputs_embeds=inputs_embeds,
670
+ use_cache=use_cache,
671
+ output_attentions=output_attentions,
672
+ output_hidden_states=output_hidden_states,
673
+ return_dict=return_dict,
674
+ )
675
+ hidden_states = transformer_outputs[0]
676
+
677
+ # make sure sampling in fp16 works correctly and
678
+ # compute loss in fp32 to match with mesh-tf version
679
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
680
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
681
+
682
+ loss = None
683
+ if labels is not None:
684
+ # move labels to correct device to enable model parallelism
685
+ labels = labels.to(lm_logits.device)
686
+ # Shift so that tokens < n predict n
687
+ shift_logits = lm_logits[..., :-1, :].contiguous()
688
+ shift_labels = labels[..., 1:].contiguous()
689
+ # Flatten the tokens
690
+ loss_fct = CrossEntropyLoss()
691
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
692
+
693
+ loss = loss.to(hidden_states.dtype)
694
+
695
+ if not return_dict:
696
+ output = (lm_logits,) + transformer_outputs[1:]
697
+ return ((loss,) + output) if loss is not None else output
698
+
699
+ return CausalLMOutputWithPast(
700
+ loss=loss,
701
+ logits=lm_logits,
702
+ past_key_values=transformer_outputs.past_key_values,
703
+ hidden_states=transformer_outputs.hidden_states,
704
+ attentions=transformer_outputs.attentions,
705
+ )
706
+
707
+ @staticmethod
708
+ def _reorder_cache(
709
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
710
+ ) -> Tuple[Tuple[torch.Tensor]]:
711
+ """
712
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
713
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
714
+ beam_idx at every generation step.
715
+ """
716
+ return tuple(
717
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
718
+ for layer_past in past_key_values
719
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CodeGen"""
16
+
17
+
18
+ import json
19
+ import os
20
+ from functools import lru_cache
21
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
22
+
23
+ import numpy as np
24
+ import regex as re
25
+
26
+ from ...utils import is_tf_available, is_torch_available, logging, to_py_obj
27
+
28
+
29
+ if TYPE_CHECKING:
30
+ if is_torch_available():
31
+ import torch
32
+ if is_tf_available():
33
+ import tensorflow as tf
34
+
35
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ VOCAB_FILES_NAMES = {
41
+ "vocab_file": "vocab.json",
42
+ "merges_file": "merges.txt",
43
+ }
44
+
45
+
46
+ @lru_cache()
47
+ def bytes_to_unicode():
48
+ """
49
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
50
+ characters the bpe code barfs on.
51
+
52
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
53
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
54
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
55
+ tables between utf-8 bytes and unicode strings.
56
+ """
57
+ bs = (
58
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
59
+ )
60
+ cs = bs[:]
61
+ n = 0
62
+ for b in range(2**8):
63
+ if b not in bs:
64
+ bs.append(b)
65
+ cs.append(2**8 + n)
66
+ n += 1
67
+ cs = [chr(n) for n in cs]
68
+ return dict(zip(bs, cs))
69
+
70
+
71
+ def get_pairs(word):
72
+ """
73
+ Return set of symbol pairs in a word.
74
+
75
+ Word is represented as tuple of symbols (symbols being variable-length strings).
76
+ """
77
+ pairs = set()
78
+ prev_char = word[0]
79
+ for char in word[1:]:
80
+ pairs.add((prev_char, char))
81
+ prev_char = char
82
+ return pairs
83
+
84
+
85
+ class CodeGenTokenizer(PreTrainedTokenizer):
86
+ """
87
+ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.
88
+
89
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
90
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
91
+
92
+ ```python
93
+ >>> from transformers import CodeGenTokenizer
94
+
95
+ >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
96
+ >>> tokenizer("Hello world")["input_ids"]
97
+ [15496, 995]
98
+
99
+ >>> tokenizer(" Hello world")["input_ids"]
100
+ [18435, 995]
101
+ ```
102
+
103
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
104
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
105
+
106
+ <Tip>
107
+
108
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
109
+
110
+ </Tip>
111
+
112
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
113
+ this superclass for more information regarding those methods.
114
+
115
+ Args:
116
+ vocab_file (`str`):
117
+ Path to the vocabulary file.
118
+ merges_file (`str`):
119
+ Path to the merges file.
120
+ errors (`str`, *optional*, defaults to `"replace"`):
121
+ Paradigm to follow when decoding bytes to UTF-8. See
122
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
123
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
124
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
125
+ token instead.
126
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
127
+ The beginning of sequence token.
128
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
129
+ The end of sequence token.
130
+ pad_token (`str`, *optional*):
131
+ The token used for padding, for example when batching sequences of different lengths.
132
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
133
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
134
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
135
+ add_bos_token (`bool`, *optional*, defaults to `False`):
136
+ Whether to add a beginning of sequence token at the start of sequences.
137
+ return_token_type_ids (`bool`, *optional*, defaults to `False`):
138
+ Whether to return token type IDs.
139
+ """
140
+
141
+ vocab_files_names = VOCAB_FILES_NAMES
142
+ model_input_names = ["input_ids", "attention_mask"]
143
+
144
+ def __init__(
145
+ self,
146
+ vocab_file,
147
+ merges_file,
148
+ errors="replace",
149
+ unk_token="<|endoftext|>",
150
+ bos_token="<|endoftext|>",
151
+ eos_token="<|endoftext|>",
152
+ pad_token=None,
153
+ add_prefix_space=False,
154
+ add_bos_token=False,
155
+ return_token_type_ids=False,
156
+ **kwargs,
157
+ ):
158
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
159
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
160
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
161
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
162
+ self.add_bos_token = add_bos_token
163
+ self.return_token_type_ids = return_token_type_ids
164
+ if self.return_token_type_ids:
165
+ self.model_input_names.append("token_type_ids")
166
+
167
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
168
+ self.encoder = json.load(vocab_handle)
169
+ self.decoder = {v: k for k, v in self.encoder.items()}
170
+ self.errors = errors # how to handle errors in decoding
171
+ self.byte_encoder = bytes_to_unicode()
172
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
173
+ with open(merges_file, encoding="utf-8") as merges_handle:
174
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
175
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
176
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
177
+ self.cache = {}
178
+ self.add_prefix_space = add_prefix_space
179
+
180
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
181
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
182
+ super().__init__(
183
+ errors=errors,
184
+ unk_token=unk_token,
185
+ bos_token=bos_token,
186
+ eos_token=eos_token,
187
+ pad_token=pad_token,
188
+ add_prefix_space=add_prefix_space,
189
+ add_bos_token=add_bos_token,
190
+ return_token_type_ids=return_token_type_ids,
191
+ **kwargs,
192
+ )
193
+
194
+ @property
195
+ def vocab_size(self):
196
+ return len(self.encoder)
197
+
198
+ def get_vocab(self):
199
+ return dict(self.encoder, **self.added_tokens_encoder)
200
+
201
+ def bpe(self, token):
202
+ if token in self.cache:
203
+ return self.cache[token]
204
+ word = tuple(token)
205
+ pairs = get_pairs(word)
206
+
207
+ if not pairs:
208
+ return token
209
+
210
+ while True:
211
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
212
+ if bigram not in self.bpe_ranks:
213
+ break
214
+ first, second = bigram
215
+ new_word = []
216
+ i = 0
217
+ while i < len(word):
218
+ try:
219
+ j = word.index(first, i)
220
+ except ValueError:
221
+ new_word.extend(word[i:])
222
+ break
223
+ else:
224
+ new_word.extend(word[i:j])
225
+ i = j
226
+
227
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
228
+ new_word.append(first + second)
229
+ i += 2
230
+ else:
231
+ new_word.append(word[i])
232
+ i += 1
233
+ new_word = tuple(new_word)
234
+ word = new_word
235
+ if len(word) == 1:
236
+ break
237
+ else:
238
+ pairs = get_pairs(word)
239
+ word = " ".join(word)
240
+ self.cache[token] = word
241
+ return word
242
+
243
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
244
+ if self.add_bos_token:
245
+ bos_token_ids = [self.bos_token_id]
246
+ else:
247
+ bos_token_ids = []
248
+
249
+ output = bos_token_ids + token_ids_0
250
+
251
+ if token_ids_1 is None:
252
+ return output
253
+
254
+ return output + bos_token_ids + token_ids_1
255
+
256
+ def _tokenize(self, text):
257
+ """Tokenize a string."""
258
+ bpe_tokens = []
259
+ for token in re.findall(self.pat, text):
260
+ token = "".join(
261
+ self.byte_encoder[b] for b in token.encode("utf-8")
262
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
263
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
264
+ return bpe_tokens
265
+
266
+ def _convert_token_to_id(self, token):
267
+ """Converts a token (str) in an id using the vocab."""
268
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
269
+
270
+ def _convert_id_to_token(self, index):
271
+ """Converts an index (integer) in a token (str) using the vocab."""
272
+ return self.decoder.get(index)
273
+
274
+ def convert_tokens_to_string(self, tokens):
275
+ """Converts a sequence of tokens (string) in a single string."""
276
+ text = "".join(tokens)
277
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
278
+ return text
279
+
280
+ def create_token_type_ids_from_sequences(
281
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
282
+ ) -> List[int]:
283
+ """
284
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence
285
+ pair mask has the following format:
286
+
287
+ ```
288
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
289
+ | first sequence | second sequence |
290
+ ```
291
+
292
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
293
+
294
+ Args:
295
+ token_ids_0 (`List[int]`):
296
+ List of IDs.
297
+ token_ids_1 (`List[int]`, *optional*):
298
+ Optional second list of IDs for sequence pairs.
299
+
300
+ Returns:
301
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
302
+ """
303
+ sep = [self.sep_token_id] if self.sep_token_id is not None else []
304
+ cls = [self.cls_token_id] if self.sep_token_id is not None else []
305
+ if token_ids_1 is None:
306
+ return len(cls + token_ids_0 + sep) * [0]
307
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
308
+
309
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
310
+ if not os.path.isdir(save_directory):
311
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
312
+ return
313
+ vocab_file = os.path.join(
314
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
315
+ )
316
+ merge_file = os.path.join(
317
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
318
+ )
319
+
320
+ with open(vocab_file, "w", encoding="utf-8") as f:
321
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
322
+
323
+ index = 0
324
+ with open(merge_file, "w", encoding="utf-8") as writer:
325
+ writer.write("#version: 0.2\n")
326
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
327
+ if index != token_index:
328
+ logger.warning(
329
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
330
+ " Please check that the tokenizer is not corrupted!"
331
+ )
332
+ index = token_index
333
+ writer.write(" ".join(bpe_tokens) + "\n")
334
+ index += 1
335
+
336
+ return vocab_file, merge_file
337
+
338
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
339
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
340
+ if is_split_into_words or add_prefix_space:
341
+ text = " " + text
342
+ return (text, kwargs)
343
+
344
+ def decode(
345
+ self,
346
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
347
+ skip_special_tokens: bool = False,
348
+ clean_up_tokenization_spaces: bool = None,
349
+ truncate_before_pattern: Optional[List[str]] = None,
350
+ **kwargs,
351
+ ) -> str:
352
+ """
353
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
354
+ tokens and clean up tokenization spaces.
355
+
356
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
357
+
358
+ Args:
359
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
360
+ List of tokenized input ids. Can be obtained using the `__call__` method.
361
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
362
+ Whether or not to remove special tokens in the decoding.
363
+ clean_up_tokenization_spaces (`bool`, *optional*):
364
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
365
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
366
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
367
+ A list of regular expression strings that will be used to truncate the returned string. This can be
368
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
369
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
370
+ kwargs (additional keyword arguments, *optional*):
371
+ Will be passed to the underlying model specific decode method.
372
+
373
+ Returns:
374
+ `str`: The decoded sentence.
375
+ """
376
+
377
+ token_ids = to_py_obj(token_ids)
378
+
379
+ decoded_text = super()._decode(
380
+ token_ids=token_ids,
381
+ skip_special_tokens=skip_special_tokens,
382
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
383
+ **kwargs,
384
+ )
385
+
386
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
387
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
388
+
389
+ return decoded_text
390
+
391
+ def truncate(self, completion, truncate_before_pattern):
392
+ def find_re(string, pattern, start_pos):
393
+ m = pattern.search(string, start_pos)
394
+ return m.start() if m else -1
395
+
396
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
397
+
398
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
399
+
400
+ if len(prints) > 1:
401
+ completion = completion[: prints[1].start()]
402
+
403
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
404
+
405
+ if len(defs) > 1:
406
+ completion = completion[: defs[1].start()]
407
+
408
+ start_pos = 0
409
+
410
+ terminals_pos = [
411
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
412
+ ]
413
+
414
+ if len(terminals_pos) > 0:
415
+ return completion[: min(terminals_pos)]
416
+ else:
417
+ return completion
llmeval-env/lib/python3.10/site-packages/transformers/models/codegen/tokenization_codegen_fast.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for OpenAI GPT."""
16
+
17
+
18
+ import json
19
+ import re
20
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...utils import is_tf_available, is_torch_available, logging
25
+
26
+
27
+ if TYPE_CHECKING:
28
+ if is_torch_available():
29
+ import torch
30
+ if is_tf_available():
31
+ import tensorflow as tf
32
+
33
+ from tokenizers import pre_tokenizers
34
+
35
+ from ...tokenization_utils_base import BatchEncoding
36
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
37
+ from .tokenization_codegen import CodeGenTokenizer
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
43
+
44
+
45
+ class CodeGenTokenizerFast(PreTrainedTokenizerFast):
46
+ """
47
+ Construct a "fast" CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
48
+ Byte-Pair-Encoding.
49
+
50
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
51
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
52
+
53
+ ```python
54
+ >>> from transformers import CodeGenTokenizerFast
55
+
56
+ >>> tokenizer = CodeGenTokenizerFast.from_pretrained("Salesforce/codegen-350M-mono")
57
+ >>> tokenizer("Hello world")["input_ids"]
58
+ [15496, 995]
59
+
60
+ >>> tokenizer(" Hello world")["input_ids"]
61
+ [18435, 995]
62
+ ```
63
+
64
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
65
+ the model was not pretrained this way, it might yield a decrease in performance.
66
+
67
+ <Tip>
68
+
69
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
70
+
71
+ </Tip>
72
+
73
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
74
+ refer to this superclass for more information regarding those methods.
75
+
76
+ Args:
77
+ vocab_file (`str`, *optional*):
78
+ Path to the vocabulary file.
79
+ merges_file (`str`, *optional*):
80
+ Path to the merges file.
81
+ tokenizer_file (`str`, *optional*):
82
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
83
+ contains everything needed to load the tokenizer.
84
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
85
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
86
+ token instead.
87
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
88
+ The beginning of sequence token.
89
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
90
+ The end of sequence token.
91
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
93
+ other word. (CodeGen tokenizer detect beginning of words by the preceding space).
94
+ return_token_type_ids (`bool`, *optional*, defaults to `False`):
95
+ Whether to return token type IDs.
96
+ """
97
+
98
+ vocab_files_names = VOCAB_FILES_NAMES
99
+ model_input_names = ["input_ids", "attention_mask"]
100
+ slow_tokenizer_class = CodeGenTokenizer
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_file=None,
105
+ merges_file=None,
106
+ tokenizer_file=None,
107
+ unk_token="<|endoftext|>",
108
+ bos_token="<|endoftext|>",
109
+ eos_token="<|endoftext|>",
110
+ add_prefix_space=False,
111
+ return_token_type_ids=False,
112
+ **kwargs,
113
+ ):
114
+ self.return_token_type_ids = return_token_type_ids
115
+ if self.return_token_type_ids:
116
+ self.model_input_names.append("token_type_ids")
117
+
118
+ super().__init__(
119
+ vocab_file,
120
+ merges_file,
121
+ tokenizer_file=tokenizer_file,
122
+ unk_token=unk_token,
123
+ bos_token=bos_token,
124
+ eos_token=eos_token,
125
+ add_prefix_space=add_prefix_space,
126
+ return_token_type_ids=return_token_type_ids,
127
+ **kwargs,
128
+ )
129
+
130
+ if kwargs.pop("add_bos_token", False):
131
+ model_id = kwargs.pop("name_or_path", "")
132
+ raise ValueError(
133
+ "Currenty GPT2's fast tokenizer does NOT support adding a BOS token. "
134
+ "Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
135
+ f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
136
+ f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
137
+ "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
138
+ " so that the fast tokenizer works correctly."
139
+ )
140
+
141
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
142
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
143
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
144
+ pre_tok_state["add_prefix_space"] = add_prefix_space
145
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
146
+
147
+ self.add_prefix_space = add_prefix_space
148
+
149
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
150
+ is_split_into_words = kwargs.get("is_split_into_words", False)
151
+ assert self.add_prefix_space or not is_split_into_words, (
152
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
153
+ "to use it with pretokenized inputs."
154
+ )
155
+
156
+ return super()._batch_encode_plus(*args, **kwargs)
157
+
158
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
159
+ is_split_into_words = kwargs.get("is_split_into_words", False)
160
+
161
+ assert self.add_prefix_space or not is_split_into_words, (
162
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
163
+ "to use it with pretokenized inputs."
164
+ )
165
+
166
+ return super()._encode_plus(*args, **kwargs)
167
+
168
+ # Copied from transformers.models.codegen.tokenization_codegen.CodeGenTokenizer.create_token_type_ids_from_sequences
169
+ def create_token_type_ids_from_sequences(
170
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
171
+ ) -> List[int]:
172
+ """
173
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence
174
+ pair mask has the following format:
175
+
176
+ ```
177
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
178
+ | first sequence | second sequence |
179
+ ```
180
+
181
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
182
+
183
+ Args:
184
+ token_ids_0 (`List[int]`):
185
+ List of IDs.
186
+ token_ids_1 (`List[int]`, *optional*):
187
+ Optional second list of IDs for sequence pairs.
188
+
189
+ Returns:
190
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
191
+ """
192
+ sep = [self.sep_token_id] if self.sep_token_id is not None else []
193
+ cls = [self.cls_token_id] if self.sep_token_id is not None else []
194
+ if token_ids_1 is None:
195
+ return len(cls + token_ids_0 + sep) * [0]
196
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
197
+
198
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
199
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
200
+ return tuple(files)
201
+
202
+ def decode(
203
+ self,
204
+ token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
205
+ skip_special_tokens: bool = False,
206
+ clean_up_tokenization_spaces: bool = None,
207
+ truncate_before_pattern: Optional[List[str]] = None,
208
+ **kwargs,
209
+ ) -> str:
210
+ """
211
+ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
212
+ tokens and clean up tokenization spaces.
213
+
214
+ Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
215
+
216
+ Args:
217
+ token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
218
+ List of tokenized input ids. Can be obtained using the `__call__` method.
219
+ skip_special_tokens (`bool`, *optional*, defaults to `False`):
220
+ Whether or not to remove special tokens in the decoding.
221
+ clean_up_tokenization_spaces (`bool`, *optional*):
222
+ Whether or not to clean up the tokenization spaces. If `None`, will default to
223
+ `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
224
+ truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
225
+ A list of regular expression strings that will be used to truncate the returned string. This can be
226
+ used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
227
+ of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
228
+ kwargs (additional keyword arguments, *optional*):
229
+ Will be passed to the underlying model specific decode method.
230
+
231
+ Returns:
232
+ `str`: The decoded sentence.
233
+ """
234
+
235
+ decoded_text = super().decode(
236
+ token_ids=token_ids,
237
+ skip_special_tokens=skip_special_tokens,
238
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
239
+ **kwargs,
240
+ )
241
+
242
+ if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
243
+ decoded_text = self.truncate(decoded_text, truncate_before_pattern)
244
+
245
+ return decoded_text
246
+
247
+ def truncate(self, completion, truncate_before_pattern):
248
+ def find_re(string, pattern, start_pos):
249
+ m = pattern.search(string, start_pos)
250
+ return m.start() if m else -1
251
+
252
+ terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
253
+
254
+ prints = list(re.finditer("^print", completion, re.MULTILINE))
255
+
256
+ if len(prints) > 1:
257
+ completion = completion[: prints[1].start()]
258
+
259
+ defs = list(re.finditer("^def", completion, re.MULTILINE))
260
+
261
+ if len(defs) > 1:
262
+ completion = completion[: defs[1].start()]
263
+
264
+ start_pos = 0
265
+
266
+ terminals_pos = [
267
+ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
268
+ ]
269
+
270
+ if len(terminals_pos) > 0:
271
+ return completion[: min(terminals_pos)]
272
+ else:
273
+ return completion
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/configuration_distilbert.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ DistilBERT model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...onnx import OnnxConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class DistilBertConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`DistilBertModel`] or a [`TFDistilBertModel`]. It
33
+ is used to instantiate a DistilBERT model according to the specified arguments, defining the model architecture.
34
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the DistilBERT
35
+ [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) architecture.
36
+
37
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
38
+ documentation from [`PretrainedConfig`] for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 30522):
42
+ Vocabulary size of the DistilBERT model. Defines the number of different tokens that can be represented by
43
+ the `inputs_ids` passed when calling [`DistilBertModel`] or [`TFDistilBertModel`].
44
+ max_position_embeddings (`int`, *optional*, defaults to 512):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ sinusoidal_pos_embds (`boolean`, *optional*, defaults to `False`):
48
+ Whether to use sinusoidal positional embeddings.
49
+ n_layers (`int`, *optional*, defaults to 6):
50
+ Number of hidden layers in the Transformer encoder.
51
+ n_heads (`int`, *optional*, defaults to 12):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ dim (`int`, *optional*, defaults to 768):
54
+ Dimensionality of the encoder layers and the pooler layer.
55
+ hidden_dim (`int`, *optional*, defaults to 3072):
56
+ The size of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
57
+ dropout (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
+ attention_dropout (`float`, *optional*, defaults to 0.1):
60
+ The dropout ratio for the attention probabilities.
61
+ activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
62
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
63
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ qa_dropout (`float`, *optional*, defaults to 0.1):
67
+ The dropout probabilities used in the question answering model [`DistilBertForQuestionAnswering`].
68
+ seq_classif_dropout (`float`, *optional*, defaults to 0.2):
69
+ The dropout probabilities used in the sequence classification and the multiple choice model
70
+ [`DistilBertForSequenceClassification`].
71
+
72
+ Examples:
73
+
74
+ ```python
75
+ >>> from transformers import DistilBertConfig, DistilBertModel
76
+
77
+ >>> # Initializing a DistilBERT configuration
78
+ >>> configuration = DistilBertConfig()
79
+
80
+ >>> # Initializing a model (with random weights) from the configuration
81
+ >>> model = DistilBertModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "distilbert"
88
+ attribute_map = {
89
+ "hidden_size": "dim",
90
+ "num_attention_heads": "n_heads",
91
+ "num_hidden_layers": "n_layers",
92
+ }
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=30522,
97
+ max_position_embeddings=512,
98
+ sinusoidal_pos_embds=False,
99
+ n_layers=6,
100
+ n_heads=12,
101
+ dim=768,
102
+ hidden_dim=4 * 768,
103
+ dropout=0.1,
104
+ attention_dropout=0.1,
105
+ activation="gelu",
106
+ initializer_range=0.02,
107
+ qa_dropout=0.1,
108
+ seq_classif_dropout=0.2,
109
+ pad_token_id=0,
110
+ **kwargs,
111
+ ):
112
+ self.vocab_size = vocab_size
113
+ self.max_position_embeddings = max_position_embeddings
114
+ self.sinusoidal_pos_embds = sinusoidal_pos_embds
115
+ self.n_layers = n_layers
116
+ self.n_heads = n_heads
117
+ self.dim = dim
118
+ self.hidden_dim = hidden_dim
119
+ self.dropout = dropout
120
+ self.attention_dropout = attention_dropout
121
+ self.activation = activation
122
+ self.initializer_range = initializer_range
123
+ self.qa_dropout = qa_dropout
124
+ self.seq_classif_dropout = seq_classif_dropout
125
+ super().__init__(**kwargs, pad_token_id=pad_token_id)
126
+
127
+
128
+ class DistilBertOnnxConfig(OnnxConfig):
129
+ @property
130
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
131
+ if self.task == "multiple-choice":
132
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
133
+ else:
134
+ dynamic_axis = {0: "batch", 1: "sequence"}
135
+ return OrderedDict(
136
+ [
137
+ ("input_ids", dynamic_axis),
138
+ ("attention_mask", dynamic_axis),
139
+ ]
140
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_distilbert.py ADDED
@@ -0,0 +1,1384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in
18
+ part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)
19
+ """
20
+
21
+
22
+ import math
23
+ from typing import Dict, List, Optional, Set, Tuple, Union
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import get_activation
32
+ from ...configuration_utils import PretrainedConfig
33
+ from ...integrations.deepspeed import is_deepspeed_zero3_enabled
34
+ from ...modeling_outputs import (
35
+ BaseModelOutput,
36
+ MaskedLMOutput,
37
+ MultipleChoiceModelOutput,
38
+ QuestionAnsweringModelOutput,
39
+ SequenceClassifierOutput,
40
+ TokenClassifierOutput,
41
+ )
42
+ from ...modeling_utils import PreTrainedModel
43
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
44
+ from ...utils import (
45
+ add_code_sample_docstrings,
46
+ add_start_docstrings,
47
+ add_start_docstrings_to_model_forward,
48
+ is_flash_attn_2_available,
49
+ is_flash_attn_greater_or_equal_2_10,
50
+ logging,
51
+ replace_return_docstrings,
52
+ )
53
+ from .configuration_distilbert import DistilBertConfig
54
+
55
+
56
+ if is_flash_attn_2_available():
57
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
58
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
59
+
60
+
61
+ logger = logging.get_logger(__name__)
62
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
63
+ _CONFIG_FOR_DOC = "DistilBertConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #
70
+
71
+
72
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
73
+ def _get_unpad_data(attention_mask):
74
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
75
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
76
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
77
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
78
+ return (
79
+ indices,
80
+ cu_seqlens,
81
+ max_seqlen_in_batch,
82
+ )
83
+
84
+
85
+ def create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
86
+ if is_deepspeed_zero3_enabled():
87
+ import deepspeed
88
+
89
+ with deepspeed.zero.GatheredParameters(out, modifier_rank=0):
90
+ if torch.distributed.get_rank() == 0:
91
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
92
+ else:
93
+ _create_sinusoidal_embeddings(n_pos=n_pos, dim=dim, out=out)
94
+
95
+
96
+ def _create_sinusoidal_embeddings(n_pos: int, dim: int, out: torch.Tensor):
97
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
98
+ out.requires_grad = False
99
+ out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
100
+ out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
101
+ out.detach_()
102
+
103
+
104
+ class Embeddings(nn.Module):
105
+ def __init__(self, config: PretrainedConfig):
106
+ super().__init__()
107
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
108
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
109
+
110
+ self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
111
+ self.dropout = nn.Dropout(config.dropout)
112
+ self.register_buffer(
113
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
114
+ )
115
+
116
+ def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor] = None) -> torch.Tensor:
117
+ """
118
+ Parameters:
119
+ input_ids (torch.Tensor):
120
+ torch.tensor(bs, max_seq_length) The token ids to embed.
121
+ input_embeds (*optional*, torch.Tensor):
122
+ The pre-computed word embeddings. Can only be passed if the input ids are `None`.
123
+
124
+
125
+ Returns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type
126
+ embeddings)
127
+ """
128
+ if input_ids is not None:
129
+ input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
130
+
131
+ seq_length = input_embeds.size(1)
132
+
133
+ # Setting the position-ids to the registered buffer in constructor, it helps
134
+ # when tracing the model without passing position-ids, solves
135
+ # isues similar to issue #5664
136
+ if hasattr(self, "position_ids"):
137
+ position_ids = self.position_ids[:, :seq_length]
138
+ else:
139
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
140
+ position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
141
+
142
+ position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
143
+
144
+ embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
145
+ embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
146
+ embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
147
+ return embeddings
148
+
149
+
150
+ class MultiHeadSelfAttention(nn.Module):
151
+ def __init__(self, config: PretrainedConfig):
152
+ super().__init__()
153
+ self.config = config
154
+
155
+ self.n_heads = config.n_heads
156
+ self.dim = config.dim
157
+ self.dropout = nn.Dropout(p=config.attention_dropout)
158
+ self.is_causal = False
159
+
160
+ # Have an even number of multi heads that divide the dimensions
161
+ if self.dim % self.n_heads != 0:
162
+ # Raise value errors for even multi-head attention nodes
163
+ raise ValueError(f"self.n_heads: {self.n_heads} must divide self.dim: {self.dim} evenly")
164
+
165
+ self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
166
+ self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
167
+ self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
168
+ self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)
169
+
170
+ self.pruned_heads: Set[int] = set()
171
+ self.attention_head_size = self.dim // self.n_heads
172
+
173
+ def prune_heads(self, heads: List[int]):
174
+ if len(heads) == 0:
175
+ return
176
+ heads, index = find_pruneable_heads_and_indices(
177
+ heads, self.n_heads, self.attention_head_size, self.pruned_heads
178
+ )
179
+ # Prune linear layers
180
+ self.q_lin = prune_linear_layer(self.q_lin, index)
181
+ self.k_lin = prune_linear_layer(self.k_lin, index)
182
+ self.v_lin = prune_linear_layer(self.v_lin, index)
183
+ self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
184
+ # Update hyper params
185
+ self.n_heads = self.n_heads - len(heads)
186
+ self.dim = self.attention_head_size * self.n_heads
187
+ self.pruned_heads = self.pruned_heads.union(heads)
188
+
189
+ def forward(
190
+ self,
191
+ query: torch.Tensor,
192
+ key: torch.Tensor,
193
+ value: torch.Tensor,
194
+ mask: torch.Tensor,
195
+ head_mask: Optional[torch.Tensor] = None,
196
+ output_attentions: bool = False,
197
+ ) -> Tuple[torch.Tensor, ...]:
198
+ """
199
+ Parameters:
200
+ query: torch.tensor(bs, seq_length, dim)
201
+ key: torch.tensor(bs, seq_length, dim)
202
+ value: torch.tensor(bs, seq_length, dim)
203
+ mask: torch.tensor(bs, seq_length)
204
+
205
+ Returns:
206
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
207
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
208
+ """
209
+ bs, q_length, dim = query.size()
210
+ k_length = key.size(1)
211
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
212
+ # assert key.size() == value.size()
213
+
214
+ dim_per_head = self.dim // self.n_heads
215
+
216
+ mask_reshp = (bs, 1, 1, k_length)
217
+
218
+ def shape(x: torch.Tensor) -> torch.Tensor:
219
+ """separate heads"""
220
+ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
221
+
222
+ def unshape(x: torch.Tensor) -> torch.Tensor:
223
+ """group heads"""
224
+ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
225
+
226
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
227
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
228
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
229
+
230
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)
231
+ scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)
232
+ mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)
233
+ scores = scores.masked_fill(
234
+ mask, torch.tensor(torch.finfo(scores.dtype).min)
235
+ ) # (bs, n_heads, q_length, k_length)
236
+
237
+ weights = nn.functional.softmax(scores, dim=-1) # (bs, n_heads, q_length, k_length)
238
+ weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)
239
+
240
+ # Mask heads if we want to
241
+ if head_mask is not None:
242
+ weights = weights * head_mask
243
+
244
+ context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)
245
+ context = unshape(context) # (bs, q_length, dim)
246
+ context = self.out_lin(context) # (bs, q_length, dim)
247
+
248
+ if output_attentions:
249
+ return (context, weights)
250
+ else:
251
+ return (context,)
252
+
253
+
254
+ class DistilBertFlashAttention2(MultiHeadSelfAttention):
255
+ """
256
+ DistilBert flash attention module. This module inherits from `MultiHeadSelfAttention` as the weights of the module
257
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
258
+ API of flash attention and deal with padding tokens in case the input contains any of them.
259
+ """
260
+
261
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
262
+ def __init__(self, *args, **kwargs):
263
+ super().__init__(*args, **kwargs)
264
+
265
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
266
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
267
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
268
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
269
+
270
+ def forward(
271
+ self,
272
+ query: torch.Tensor,
273
+ key: torch.Tensor,
274
+ value: torch.Tensor,
275
+ mask: torch.Tensor,
276
+ head_mask: Optional[torch.Tensor] = None,
277
+ output_attentions: bool = False,
278
+ ) -> Tuple[torch.Tensor, ...]:
279
+ """
280
+ Parameters:
281
+ query: torch.tensor(bs, seq_length, dim)
282
+ key: torch.tensor(bs, seq_length, dim)
283
+ value: torch.tensor(bs, seq_length, dim)
284
+ mask: torch.tensor(bs, seq_length)
285
+
286
+ Returns:
287
+ weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs,
288
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
289
+ """
290
+ batch_size, q_length, dim = query.size()
291
+
292
+ dim_per_head = self.dim // self.n_heads
293
+
294
+ def reshape(x: torch.Tensor) -> torch.Tensor:
295
+ """separate heads"""
296
+ return x.view(batch_size, -1, self.n_heads, dim_per_head)
297
+
298
+ # Flash attention requires the input to have the shape
299
+ # batch_size x seq_length x head_dim x hidden_dim
300
+ query_states = reshape(self.q_lin(query))
301
+ key_states = reshape(self.k_lin(key))
302
+ value_states = reshape(self.v_lin(value))
303
+
304
+ attn_dropout = self.config.attention_dropout if self.training else 0.0
305
+
306
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
307
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
308
+ # cast them back in the correct dtype just to be sure everything works as expected.
309
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
310
+ # in fp32. (LlamaRMSNorm handles it correctly)
311
+
312
+ if query_states.dtype == torch.float32:
313
+ if torch.is_autocast_enabled():
314
+ target_dtype = torch.get_autocast_gpu_dtype()
315
+ # Handle the case where the model is quantized
316
+ elif hasattr(self.config, "_pre_quantization_dtype"):
317
+ target_dtype = self.config._pre_quantization_dtype
318
+ else:
319
+ target_dtype = self.q_lin.weight.dtype
320
+
321
+ logger.warning_once(
322
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
323
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
324
+ f" {target_dtype}."
325
+ )
326
+
327
+ query_states = query_states.to(target_dtype)
328
+ key_states = key_states.to(target_dtype)
329
+ value_states = value_states.to(target_dtype)
330
+
331
+ attn_weights = self._flash_attention_forward(
332
+ query_states, key_states, value_states, mask, q_length, dropout=attn_dropout
333
+ )
334
+
335
+ attn_weights_reshaped = attn_weights.reshape(batch_size, q_length, self.n_heads * dim_per_head)
336
+ attn_output = self.out_lin(attn_weights_reshaped)
337
+
338
+ if output_attentions:
339
+ return (attn_output, attn_weights)
340
+ else:
341
+ return (attn_output,)
342
+
343
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward with causal=True->causal=False
344
+ def _flash_attention_forward(
345
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
346
+ ):
347
+ """
348
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
349
+ first unpad the input, then computes the attention scores and pad the final attention scores.
350
+
351
+ Args:
352
+ query_states (`torch.Tensor`):
353
+ Input query states to be passed to Flash Attention API
354
+ key_states (`torch.Tensor`):
355
+ Input key states to be passed to Flash Attention API
356
+ value_states (`torch.Tensor`):
357
+ Input value states to be passed to Flash Attention API
358
+ attention_mask (`torch.Tensor`):
359
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
360
+ position of padding tokens and 1 for the position of non-padding tokens.
361
+ dropout (`float`):
362
+ Attention dropout
363
+ softmax_scale (`float`, *optional*):
364
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
365
+ """
366
+ if not self._flash_attn_uses_top_left_mask:
367
+ causal = self.is_causal
368
+ else:
369
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
370
+ causal = self.is_causal and query_length != 1
371
+
372
+ # Contains at least one padding token in the sequence
373
+ if attention_mask is not None:
374
+ batch_size = query_states.shape[0]
375
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
376
+ query_states, key_states, value_states, attention_mask, query_length
377
+ )
378
+
379
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
380
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
381
+
382
+ attn_output_unpad = flash_attn_varlen_func(
383
+ query_states,
384
+ key_states,
385
+ value_states,
386
+ cu_seqlens_q=cu_seqlens_q,
387
+ cu_seqlens_k=cu_seqlens_k,
388
+ max_seqlen_q=max_seqlen_in_batch_q,
389
+ max_seqlen_k=max_seqlen_in_batch_k,
390
+ dropout_p=dropout,
391
+ softmax_scale=softmax_scale,
392
+ causal=causal,
393
+ )
394
+
395
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
396
+ else:
397
+ attn_output = flash_attn_func(
398
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
399
+ )
400
+
401
+ return attn_output
402
+
403
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->n_heads
404
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
405
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
406
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
407
+
408
+ key_layer = index_first_axis(
409
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
410
+ )
411
+ value_layer = index_first_axis(
412
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
413
+ )
414
+ if query_length == kv_seq_len:
415
+ query_layer = index_first_axis(
416
+ query_layer.reshape(batch_size * kv_seq_len, self.n_heads, head_dim), indices_k
417
+ )
418
+ cu_seqlens_q = cu_seqlens_k
419
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
420
+ indices_q = indices_k
421
+ elif query_length == 1:
422
+ max_seqlen_in_batch_q = 1
423
+ cu_seqlens_q = torch.arange(
424
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
425
+ ) # There is a memcpy here, that is very bad.
426
+ indices_q = cu_seqlens_q[:-1]
427
+ query_layer = query_layer.squeeze(1)
428
+ else:
429
+ # The -q_len: slice assumes left padding.
430
+ attention_mask = attention_mask[:, -query_length:]
431
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
432
+
433
+ return (
434
+ query_layer,
435
+ key_layer,
436
+ value_layer,
437
+ indices_q,
438
+ (cu_seqlens_q, cu_seqlens_k),
439
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
440
+ )
441
+
442
+
443
+ class FFN(nn.Module):
444
+ def __init__(self, config: PretrainedConfig):
445
+ super().__init__()
446
+ self.dropout = nn.Dropout(p=config.dropout)
447
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
448
+ self.seq_len_dim = 1
449
+ self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)
450
+ self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)
451
+ self.activation = get_activation(config.activation)
452
+
453
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
454
+ return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
455
+
456
+ def ff_chunk(self, input: torch.Tensor) -> torch.Tensor:
457
+ x = self.lin1(input)
458
+ x = self.activation(x)
459
+ x = self.lin2(x)
460
+ x = self.dropout(x)
461
+ return x
462
+
463
+
464
+ DISTILBERT_ATTENTION_CLASSES = {
465
+ "eager": MultiHeadSelfAttention,
466
+ "flash_attention_2": DistilBertFlashAttention2,
467
+ }
468
+
469
+
470
+ class TransformerBlock(nn.Module):
471
+ def __init__(self, config: PretrainedConfig):
472
+ super().__init__()
473
+
474
+ # Have an even number of Configure multi-heads
475
+ if config.dim % config.n_heads != 0:
476
+ raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
477
+
478
+ self.attention = DISTILBERT_ATTENTION_CLASSES[config._attn_implementation](config)
479
+ self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
480
+
481
+ self.ffn = FFN(config)
482
+ self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
483
+
484
+ def forward(
485
+ self,
486
+ x: torch.Tensor,
487
+ attn_mask: Optional[torch.Tensor] = None,
488
+ head_mask: Optional[torch.Tensor] = None,
489
+ output_attentions: bool = False,
490
+ ) -> Tuple[torch.Tensor, ...]:
491
+ """
492
+ Parameters:
493
+ x: torch.tensor(bs, seq_length, dim)
494
+ attn_mask: torch.tensor(bs, seq_length)
495
+
496
+ Returns:
497
+ sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
498
+ torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization.
499
+ """
500
+ # Self-Attention
501
+ sa_output = self.attention(
502
+ query=x,
503
+ key=x,
504
+ value=x,
505
+ mask=attn_mask,
506
+ head_mask=head_mask,
507
+ output_attentions=output_attentions,
508
+ )
509
+ if output_attentions:
510
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
511
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
512
+ if type(sa_output) != tuple:
513
+ raise TypeError(f"sa_output must be a tuple but it is {type(sa_output)} type")
514
+
515
+ sa_output = sa_output[0]
516
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
517
+
518
+ # Feed Forward Network
519
+ ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)
520
+ ffn_output: torch.Tensor = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
521
+
522
+ output = (ffn_output,)
523
+ if output_attentions:
524
+ output = (sa_weights,) + output
525
+ return output
526
+
527
+
528
+ class Transformer(nn.Module):
529
+ def __init__(self, config: PretrainedConfig):
530
+ super().__init__()
531
+ self.n_layers = config.n_layers
532
+ self.layer = nn.ModuleList([TransformerBlock(config) for _ in range(config.n_layers)])
533
+ self.gradient_checkpointing = False
534
+
535
+ def forward(
536
+ self,
537
+ x: torch.Tensor,
538
+ attn_mask: Optional[torch.Tensor] = None,
539
+ head_mask: Optional[torch.Tensor] = None,
540
+ output_attentions: bool = False,
541
+ output_hidden_states: bool = False,
542
+ return_dict: Optional[bool] = None,
543
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]: # docstyle-ignore
544
+ """
545
+ Parameters:
546
+ x: torch.tensor(bs, seq_length, dim) Input sequence embedded.
547
+ attn_mask: torch.tensor(bs, seq_length) Attention mask on the sequence.
548
+
549
+ Returns:
550
+ hidden_state: torch.tensor(bs, seq_length, dim) Sequence of hidden states in the last (top)
551
+ layer all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]
552
+ Tuple of length n_layers with the hidden states from each layer.
553
+ Optional: only if output_hidden_states=True
554
+ all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]
555
+ Tuple of length n_layers with the attention weights from each layer
556
+ Optional: only if output_attentions=True
557
+ """
558
+ all_hidden_states = () if output_hidden_states else None
559
+ all_attentions = () if output_attentions else None
560
+
561
+ hidden_state = x
562
+ for i, layer_module in enumerate(self.layer):
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_state,)
565
+
566
+ if self.gradient_checkpointing and self.training:
567
+ layer_outputs = self._gradient_checkpointing_func(
568
+ layer_module.__call__,
569
+ hidden_state,
570
+ attn_mask,
571
+ head_mask[i],
572
+ output_attentions,
573
+ )
574
+ else:
575
+ layer_outputs = layer_module(
576
+ hidden_state,
577
+ attn_mask,
578
+ head_mask[i],
579
+ output_attentions,
580
+ )
581
+
582
+ hidden_state = layer_outputs[-1]
583
+
584
+ if output_attentions:
585
+ if len(layer_outputs) != 2:
586
+ raise ValueError(f"The length of the layer_outputs should be 2, but it is {len(layer_outputs)}")
587
+
588
+ attentions = layer_outputs[0]
589
+ all_attentions = all_attentions + (attentions,)
590
+ else:
591
+ if len(layer_outputs) != 1:
592
+ raise ValueError(f"The length of the layer_outputs should be 1, but it is {len(layer_outputs)}")
593
+
594
+ # Add last layer
595
+ if output_hidden_states:
596
+ all_hidden_states = all_hidden_states + (hidden_state,)
597
+
598
+ if not return_dict:
599
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
600
+ return BaseModelOutput(
601
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
602
+ )
603
+
604
+
605
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
606
+ class DistilBertPreTrainedModel(PreTrainedModel):
607
+ """
608
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
609
+ models.
610
+ """
611
+
612
+ config_class = DistilBertConfig
613
+ load_tf_weights = None
614
+ base_model_prefix = "distilbert"
615
+ supports_gradient_checkpointing = True
616
+ _supports_flash_attn_2 = True
617
+
618
+ def _init_weights(self, module: nn.Module):
619
+ """Initialize the weights."""
620
+ if isinstance(module, nn.Linear):
621
+ # Slightly different from the TF version which uses truncated_normal for initialization
622
+ # cf https://github.com/pytorch/pytorch/pull/5617
623
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
624
+ if module.bias is not None:
625
+ module.bias.data.zero_()
626
+ elif isinstance(module, nn.Embedding):
627
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
628
+ if module.padding_idx is not None:
629
+ module.weight.data[module.padding_idx].zero_()
630
+ elif isinstance(module, nn.LayerNorm):
631
+ module.bias.data.zero_()
632
+ module.weight.data.fill_(1.0)
633
+ elif isinstance(module, Embeddings) and self.config.sinusoidal_pos_embds:
634
+ create_sinusoidal_embeddings(
635
+ self.config.max_position_embeddings, self.config.dim, module.position_embeddings.weight
636
+ )
637
+
638
+
639
+ DISTILBERT_START_DOCSTRING = r"""
640
+
641
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
642
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
643
+ etc.)
644
+
645
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
646
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
647
+ and behavior.
648
+
649
+ Parameters:
650
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
651
+ Initializing with a config file does not load the weights associated with the model, only the
652
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
653
+ """
654
+
655
+ DISTILBERT_INPUTS_DOCSTRING = r"""
656
+ Args:
657
+ input_ids (`torch.LongTensor` of shape `({0})`):
658
+ Indices of input sequence tokens in the vocabulary.
659
+
660
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
661
+ [`PreTrainedTokenizer.__call__`] for details.
662
+
663
+ [What are input IDs?](../glossary#input-ids)
664
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
665
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
666
+
667
+ - 1 for tokens that are **not masked**,
668
+ - 0 for tokens that are **masked**.
669
+
670
+ [What are attention masks?](../glossary#attention-mask)
671
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
672
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
673
+
674
+ - 1 indicates the head is **not masked**,
675
+ - 0 indicates the head is **masked**.
676
+
677
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
678
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
679
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
680
+ model's internal embedding lookup matrix.
681
+ output_attentions (`bool`, *optional*):
682
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
683
+ tensors for more detail.
684
+ output_hidden_states (`bool`, *optional*):
685
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
686
+ more detail.
687
+ return_dict (`bool`, *optional*):
688
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
689
+ """
690
+
691
+
692
+ @add_start_docstrings(
693
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
694
+ DISTILBERT_START_DOCSTRING,
695
+ )
696
+ class DistilBertModel(DistilBertPreTrainedModel):
697
+ def __init__(self, config: PretrainedConfig):
698
+ super().__init__(config)
699
+
700
+ self.embeddings = Embeddings(config) # Embeddings
701
+ self.transformer = Transformer(config) # Encoder
702
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
703
+
704
+ # Initialize weights and apply final processing
705
+ self.post_init()
706
+
707
+ def get_position_embeddings(self) -> nn.Embedding:
708
+ """
709
+ Returns the position embeddings
710
+ """
711
+ return self.embeddings.position_embeddings
712
+
713
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
714
+ """
715
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
716
+
717
+ Arguments:
718
+ new_num_position_embeddings (`int`):
719
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
720
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
721
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
722
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
723
+ the size will remove vectors from the end.
724
+ """
725
+ num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
726
+
727
+ # no resizing needs to be done if the length stays the same
728
+ if num_position_embeds_diff == 0:
729
+ return
730
+
731
+ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
732
+ self.config.max_position_embeddings = new_num_position_embeddings
733
+
734
+ old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
735
+
736
+ self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
737
+
738
+ if self.config.sinusoidal_pos_embds:
739
+ create_sinusoidal_embeddings(
740
+ n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
741
+ )
742
+ else:
743
+ with torch.no_grad():
744
+ if num_position_embeds_diff > 0:
745
+ self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
746
+ old_position_embeddings_weight
747
+ )
748
+ else:
749
+ self.embeddings.position_embeddings.weight = nn.Parameter(
750
+ old_position_embeddings_weight[:num_position_embeds_diff]
751
+ )
752
+ # move position_embeddings to correct device
753
+ self.embeddings.position_embeddings.to(self.device)
754
+
755
+ def get_input_embeddings(self) -> nn.Embedding:
756
+ return self.embeddings.word_embeddings
757
+
758
+ def set_input_embeddings(self, new_embeddings: nn.Embedding):
759
+ self.embeddings.word_embeddings = new_embeddings
760
+
761
+ def _prune_heads(self, heads_to_prune: Dict[int, List[List[int]]]):
762
+ """
763
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
764
+ class PreTrainedModel
765
+ """
766
+ for layer, heads in heads_to_prune.items():
767
+ self.transformer.layer[layer].attention.prune_heads(heads)
768
+
769
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
770
+ @add_code_sample_docstrings(
771
+ checkpoint=_CHECKPOINT_FOR_DOC,
772
+ output_type=BaseModelOutput,
773
+ config_class=_CONFIG_FOR_DOC,
774
+ )
775
+ def forward(
776
+ self,
777
+ input_ids: Optional[torch.Tensor] = None,
778
+ attention_mask: Optional[torch.Tensor] = None,
779
+ head_mask: Optional[torch.Tensor] = None,
780
+ inputs_embeds: Optional[torch.Tensor] = None,
781
+ output_attentions: Optional[bool] = None,
782
+ output_hidden_states: Optional[bool] = None,
783
+ return_dict: Optional[bool] = None,
784
+ ) -> Union[BaseModelOutput, Tuple[torch.Tensor, ...]]:
785
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
786
+ output_hidden_states = (
787
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
788
+ )
789
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
790
+
791
+ if input_ids is not None and inputs_embeds is not None:
792
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
793
+ elif input_ids is not None:
794
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
795
+ input_shape = input_ids.size()
796
+ elif inputs_embeds is not None:
797
+ input_shape = inputs_embeds.size()[:-1]
798
+ else:
799
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
800
+
801
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
802
+
803
+ # Prepare head mask if needed
804
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
805
+
806
+ embeddings = self.embeddings(input_ids, inputs_embeds) # (bs, seq_length, dim)
807
+
808
+ if self._use_flash_attention_2:
809
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
810
+ else:
811
+ if attention_mask is None:
812
+ attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)
813
+
814
+ return self.transformer(
815
+ x=embeddings,
816
+ attn_mask=attention_mask,
817
+ head_mask=head_mask,
818
+ output_attentions=output_attentions,
819
+ output_hidden_states=output_hidden_states,
820
+ return_dict=return_dict,
821
+ )
822
+
823
+
824
+ @add_start_docstrings(
825
+ """DistilBert Model with a `masked language modeling` head on top.""",
826
+ DISTILBERT_START_DOCSTRING,
827
+ )
828
+ class DistilBertForMaskedLM(DistilBertPreTrainedModel):
829
+ _tied_weights_keys = ["vocab_projector.weight"]
830
+
831
+ def __init__(self, config: PretrainedConfig):
832
+ super().__init__(config)
833
+
834
+ self.activation = get_activation(config.activation)
835
+
836
+ self.distilbert = DistilBertModel(config)
837
+ self.vocab_transform = nn.Linear(config.dim, config.dim)
838
+ self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)
839
+ self.vocab_projector = nn.Linear(config.dim, config.vocab_size)
840
+
841
+ # Initialize weights and apply final processing
842
+ self.post_init()
843
+
844
+ self.mlm_loss_fct = nn.CrossEntropyLoss()
845
+
846
+ def get_position_embeddings(self) -> nn.Embedding:
847
+ """
848
+ Returns the position embeddings
849
+ """
850
+ return self.distilbert.get_position_embeddings()
851
+
852
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
853
+ """
854
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
855
+
856
+ Arguments:
857
+ new_num_position_embeddings (`int`):
858
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
859
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
860
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
861
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
862
+ the size will remove vectors from the end.
863
+ """
864
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
865
+
866
+ def get_output_embeddings(self) -> nn.Module:
867
+ return self.vocab_projector
868
+
869
+ def set_output_embeddings(self, new_embeddings: nn.Module):
870
+ self.vocab_projector = new_embeddings
871
+
872
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
873
+ @add_code_sample_docstrings(
874
+ checkpoint=_CHECKPOINT_FOR_DOC,
875
+ output_type=MaskedLMOutput,
876
+ config_class=_CONFIG_FOR_DOC,
877
+ )
878
+ def forward(
879
+ self,
880
+ input_ids: Optional[torch.Tensor] = None,
881
+ attention_mask: Optional[torch.Tensor] = None,
882
+ head_mask: Optional[torch.Tensor] = None,
883
+ inputs_embeds: Optional[torch.Tensor] = None,
884
+ labels: Optional[torch.LongTensor] = None,
885
+ output_attentions: Optional[bool] = None,
886
+ output_hidden_states: Optional[bool] = None,
887
+ return_dict: Optional[bool] = None,
888
+ ) -> Union[MaskedLMOutput, Tuple[torch.Tensor, ...]]:
889
+ r"""
890
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
891
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
892
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
893
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
894
+ """
895
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
896
+
897
+ dlbrt_output = self.distilbert(
898
+ input_ids=input_ids,
899
+ attention_mask=attention_mask,
900
+ head_mask=head_mask,
901
+ inputs_embeds=inputs_embeds,
902
+ output_attentions=output_attentions,
903
+ output_hidden_states=output_hidden_states,
904
+ return_dict=return_dict,
905
+ )
906
+ hidden_states = dlbrt_output[0] # (bs, seq_length, dim)
907
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
908
+ prediction_logits = self.activation(prediction_logits) # (bs, seq_length, dim)
909
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
910
+ prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)
911
+
912
+ mlm_loss = None
913
+ if labels is not None:
914
+ mlm_loss = self.mlm_loss_fct(prediction_logits.view(-1, prediction_logits.size(-1)), labels.view(-1))
915
+
916
+ if not return_dict:
917
+ output = (prediction_logits,) + dlbrt_output[1:]
918
+ return ((mlm_loss,) + output) if mlm_loss is not None else output
919
+
920
+ return MaskedLMOutput(
921
+ loss=mlm_loss,
922
+ logits=prediction_logits,
923
+ hidden_states=dlbrt_output.hidden_states,
924
+ attentions=dlbrt_output.attentions,
925
+ )
926
+
927
+
928
+ @add_start_docstrings(
929
+ """
930
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
931
+ pooled output) e.g. for GLUE tasks.
932
+ """,
933
+ DISTILBERT_START_DOCSTRING,
934
+ )
935
+ class DistilBertForSequenceClassification(DistilBertPreTrainedModel):
936
+ def __init__(self, config: PretrainedConfig):
937
+ super().__init__(config)
938
+ self.num_labels = config.num_labels
939
+ self.config = config
940
+
941
+ self.distilbert = DistilBertModel(config)
942
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
943
+ self.classifier = nn.Linear(config.dim, config.num_labels)
944
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
945
+
946
+ # Initialize weights and apply final processing
947
+ self.post_init()
948
+
949
+ def get_position_embeddings(self) -> nn.Embedding:
950
+ """
951
+ Returns the position embeddings
952
+ """
953
+ return self.distilbert.get_position_embeddings()
954
+
955
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
956
+ """
957
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
958
+
959
+ Arguments:
960
+ new_num_position_embeddings (`int`):
961
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
962
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
963
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
964
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
965
+ the size will remove vectors from the end.
966
+ """
967
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
968
+
969
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
970
+ @add_code_sample_docstrings(
971
+ checkpoint=_CHECKPOINT_FOR_DOC,
972
+ output_type=SequenceClassifierOutput,
973
+ config_class=_CONFIG_FOR_DOC,
974
+ )
975
+ def forward(
976
+ self,
977
+ input_ids: Optional[torch.Tensor] = None,
978
+ attention_mask: Optional[torch.Tensor] = None,
979
+ head_mask: Optional[torch.Tensor] = None,
980
+ inputs_embeds: Optional[torch.Tensor] = None,
981
+ labels: Optional[torch.LongTensor] = None,
982
+ output_attentions: Optional[bool] = None,
983
+ output_hidden_states: Optional[bool] = None,
984
+ return_dict: Optional[bool] = None,
985
+ ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor, ...]]:
986
+ r"""
987
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
988
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
989
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
990
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
991
+ """
992
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
993
+
994
+ distilbert_output = self.distilbert(
995
+ input_ids=input_ids,
996
+ attention_mask=attention_mask,
997
+ head_mask=head_mask,
998
+ inputs_embeds=inputs_embeds,
999
+ output_attentions=output_attentions,
1000
+ output_hidden_states=output_hidden_states,
1001
+ return_dict=return_dict,
1002
+ )
1003
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1004
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1005
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1006
+ pooled_output = nn.ReLU()(pooled_output) # (bs, dim)
1007
+ pooled_output = self.dropout(pooled_output) # (bs, dim)
1008
+ logits = self.classifier(pooled_output) # (bs, num_labels)
1009
+
1010
+ loss = None
1011
+ if labels is not None:
1012
+ if self.config.problem_type is None:
1013
+ if self.num_labels == 1:
1014
+ self.config.problem_type = "regression"
1015
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1016
+ self.config.problem_type = "single_label_classification"
1017
+ else:
1018
+ self.config.problem_type = "multi_label_classification"
1019
+
1020
+ if self.config.problem_type == "regression":
1021
+ loss_fct = MSELoss()
1022
+ if self.num_labels == 1:
1023
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1024
+ else:
1025
+ loss = loss_fct(logits, labels)
1026
+ elif self.config.problem_type == "single_label_classification":
1027
+ loss_fct = CrossEntropyLoss()
1028
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1029
+ elif self.config.problem_type == "multi_label_classification":
1030
+ loss_fct = BCEWithLogitsLoss()
1031
+ loss = loss_fct(logits, labels)
1032
+
1033
+ if not return_dict:
1034
+ output = (logits,) + distilbert_output[1:]
1035
+ return ((loss,) + output) if loss is not None else output
1036
+
1037
+ return SequenceClassifierOutput(
1038
+ loss=loss,
1039
+ logits=logits,
1040
+ hidden_states=distilbert_output.hidden_states,
1041
+ attentions=distilbert_output.attentions,
1042
+ )
1043
+
1044
+
1045
+ @add_start_docstrings(
1046
+ """
1047
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1048
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1049
+ """,
1050
+ DISTILBERT_START_DOCSTRING,
1051
+ )
1052
+ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel):
1053
+ def __init__(self, config: PretrainedConfig):
1054
+ super().__init__(config)
1055
+
1056
+ self.distilbert = DistilBertModel(config)
1057
+ self.qa_outputs = nn.Linear(config.dim, config.num_labels)
1058
+ if config.num_labels != 2:
1059
+ raise ValueError(f"config.num_labels should be 2, but it is {config.num_labels}")
1060
+
1061
+ self.dropout = nn.Dropout(config.qa_dropout)
1062
+
1063
+ # Initialize weights and apply final processing
1064
+ self.post_init()
1065
+
1066
+ def get_position_embeddings(self) -> nn.Embedding:
1067
+ """
1068
+ Returns the position embeddings
1069
+ """
1070
+ return self.distilbert.get_position_embeddings()
1071
+
1072
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1073
+ """
1074
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1075
+
1076
+ Arguments:
1077
+ new_num_position_embeddings (`int`):
1078
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1079
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1080
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1081
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1082
+ the size will remove vectors from the end.
1083
+ """
1084
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1085
+
1086
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices"))
1087
+ @add_code_sample_docstrings(
1088
+ checkpoint=_CHECKPOINT_FOR_DOC,
1089
+ output_type=QuestionAnsweringModelOutput,
1090
+ config_class=_CONFIG_FOR_DOC,
1091
+ )
1092
+ def forward(
1093
+ self,
1094
+ input_ids: Optional[torch.Tensor] = None,
1095
+ attention_mask: Optional[torch.Tensor] = None,
1096
+ head_mask: Optional[torch.Tensor] = None,
1097
+ inputs_embeds: Optional[torch.Tensor] = None,
1098
+ start_positions: Optional[torch.Tensor] = None,
1099
+ end_positions: Optional[torch.Tensor] = None,
1100
+ output_attentions: Optional[bool] = None,
1101
+ output_hidden_states: Optional[bool] = None,
1102
+ return_dict: Optional[bool] = None,
1103
+ ) -> Union[QuestionAnsweringModelOutput, Tuple[torch.Tensor, ...]]:
1104
+ r"""
1105
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1106
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1107
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1108
+ are not taken into account for computing the loss.
1109
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1110
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1111
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1112
+ are not taken into account for computing the loss.
1113
+ """
1114
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1115
+
1116
+ distilbert_output = self.distilbert(
1117
+ input_ids=input_ids,
1118
+ attention_mask=attention_mask,
1119
+ head_mask=head_mask,
1120
+ inputs_embeds=inputs_embeds,
1121
+ output_attentions=output_attentions,
1122
+ output_hidden_states=output_hidden_states,
1123
+ return_dict=return_dict,
1124
+ )
1125
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1126
+
1127
+ hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)
1128
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1129
+ start_logits, end_logits = logits.split(1, dim=-1)
1130
+ start_logits = start_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1131
+ end_logits = end_logits.squeeze(-1).contiguous() # (bs, max_query_len)
1132
+
1133
+ total_loss = None
1134
+ if start_positions is not None and end_positions is not None:
1135
+ # If we are on multi-GPU, split add a dimension
1136
+ if len(start_positions.size()) > 1:
1137
+ start_positions = start_positions.squeeze(-1)
1138
+ if len(end_positions.size()) > 1:
1139
+ end_positions = end_positions.squeeze(-1)
1140
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1141
+ ignored_index = start_logits.size(1)
1142
+ start_positions = start_positions.clamp(0, ignored_index)
1143
+ end_positions = end_positions.clamp(0, ignored_index)
1144
+
1145
+ loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)
1146
+ start_loss = loss_fct(start_logits, start_positions)
1147
+ end_loss = loss_fct(end_logits, end_positions)
1148
+ total_loss = (start_loss + end_loss) / 2
1149
+
1150
+ if not return_dict:
1151
+ output = (start_logits, end_logits) + distilbert_output[1:]
1152
+ return ((total_loss,) + output) if total_loss is not None else output
1153
+
1154
+ return QuestionAnsweringModelOutput(
1155
+ loss=total_loss,
1156
+ start_logits=start_logits,
1157
+ end_logits=end_logits,
1158
+ hidden_states=distilbert_output.hidden_states,
1159
+ attentions=distilbert_output.attentions,
1160
+ )
1161
+
1162
+
1163
+ @add_start_docstrings(
1164
+ """
1165
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1166
+ for Named-Entity-Recognition (NER) tasks.
1167
+ """,
1168
+ DISTILBERT_START_DOCSTRING,
1169
+ )
1170
+ class DistilBertForTokenClassification(DistilBertPreTrainedModel):
1171
+ def __init__(self, config: PretrainedConfig):
1172
+ super().__init__(config)
1173
+ self.num_labels = config.num_labels
1174
+
1175
+ self.distilbert = DistilBertModel(config)
1176
+ self.dropout = nn.Dropout(config.dropout)
1177
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1178
+
1179
+ # Initialize weights and apply final processing
1180
+ self.post_init()
1181
+
1182
+ def get_position_embeddings(self) -> nn.Embedding:
1183
+ """
1184
+ Returns the position embeddings
1185
+ """
1186
+ return self.distilbert.get_position_embeddings()
1187
+
1188
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1189
+ """
1190
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1191
+
1192
+ Arguments:
1193
+ new_num_position_embeddings (`int`):
1194
+ The number of new position embedding matrix. If position embeddings are learned, increasing the size
1195
+ will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
1196
+ end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
1197
+ size will add correct vectors at the end following the position encoding algorithm, whereas reducing
1198
+ the size will remove vectors from the end.
1199
+ """
1200
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1201
+
1202
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING)
1203
+ @add_code_sample_docstrings(
1204
+ checkpoint=_CHECKPOINT_FOR_DOC,
1205
+ output_type=TokenClassifierOutput,
1206
+ config_class=_CONFIG_FOR_DOC,
1207
+ )
1208
+ def forward(
1209
+ self,
1210
+ input_ids: Optional[torch.Tensor] = None,
1211
+ attention_mask: Optional[torch.Tensor] = None,
1212
+ head_mask: Optional[torch.Tensor] = None,
1213
+ inputs_embeds: Optional[torch.Tensor] = None,
1214
+ labels: Optional[torch.LongTensor] = None,
1215
+ output_attentions: Optional[bool] = None,
1216
+ output_hidden_states: Optional[bool] = None,
1217
+ return_dict: Optional[bool] = None,
1218
+ ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor, ...]]:
1219
+ r"""
1220
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1221
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
1222
+ """
1223
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1224
+
1225
+ outputs = self.distilbert(
1226
+ input_ids,
1227
+ attention_mask=attention_mask,
1228
+ head_mask=head_mask,
1229
+ inputs_embeds=inputs_embeds,
1230
+ output_attentions=output_attentions,
1231
+ output_hidden_states=output_hidden_states,
1232
+ return_dict=return_dict,
1233
+ )
1234
+
1235
+ sequence_output = outputs[0]
1236
+
1237
+ sequence_output = self.dropout(sequence_output)
1238
+ logits = self.classifier(sequence_output)
1239
+
1240
+ loss = None
1241
+ if labels is not None:
1242
+ loss_fct = CrossEntropyLoss()
1243
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1244
+
1245
+ if not return_dict:
1246
+ output = (logits,) + outputs[1:]
1247
+ return ((loss,) + output) if loss is not None else output
1248
+
1249
+ return TokenClassifierOutput(
1250
+ loss=loss,
1251
+ logits=logits,
1252
+ hidden_states=outputs.hidden_states,
1253
+ attentions=outputs.attentions,
1254
+ )
1255
+
1256
+
1257
+ @add_start_docstrings(
1258
+ """
1259
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
1260
+ a softmax) e.g. for RocStories/SWAG tasks.
1261
+ """,
1262
+ DISTILBERT_START_DOCSTRING,
1263
+ )
1264
+ class DistilBertForMultipleChoice(DistilBertPreTrainedModel):
1265
+ def __init__(self, config: PretrainedConfig):
1266
+ super().__init__(config)
1267
+
1268
+ self.distilbert = DistilBertModel(config)
1269
+ self.pre_classifier = nn.Linear(config.dim, config.dim)
1270
+ self.classifier = nn.Linear(config.dim, 1)
1271
+ self.dropout = nn.Dropout(config.seq_classif_dropout)
1272
+
1273
+ # Initialize weights and apply final processing
1274
+ self.post_init()
1275
+
1276
+ def get_position_embeddings(self) -> nn.Embedding:
1277
+ """
1278
+ Returns the position embeddings
1279
+ """
1280
+ return self.distilbert.get_position_embeddings()
1281
+
1282
+ def resize_position_embeddings(self, new_num_position_embeddings: int):
1283
+ """
1284
+ Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
1285
+
1286
+ Arguments:
1287
+ new_num_position_embeddings (`int`)
1288
+ The number of new position embeddings. If position embeddings are learned, increasing the size will add
1289
+ newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
1290
+ position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
1291
+ add correct vectors at the end following the position encoding algorithm, whereas reducing the size
1292
+ will remove vectors from the end.
1293
+ """
1294
+ self.distilbert.resize_position_embeddings(new_num_position_embeddings)
1295
+
1296
+ @add_start_docstrings_to_model_forward(
1297
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
1298
+ )
1299
+ @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
1300
+ def forward(
1301
+ self,
1302
+ input_ids: Optional[torch.Tensor] = None,
1303
+ attention_mask: Optional[torch.Tensor] = None,
1304
+ head_mask: Optional[torch.Tensor] = None,
1305
+ inputs_embeds: Optional[torch.Tensor] = None,
1306
+ labels: Optional[torch.LongTensor] = None,
1307
+ output_attentions: Optional[bool] = None,
1308
+ output_hidden_states: Optional[bool] = None,
1309
+ return_dict: Optional[bool] = None,
1310
+ ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor, ...]]:
1311
+ r"""
1312
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1313
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
1314
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
1315
+ `input_ids` above)
1316
+
1317
+ Returns:
1318
+
1319
+ Examples:
1320
+
1321
+ ```python
1322
+ >>> from transformers import AutoTokenizer, DistilBertForMultipleChoice
1323
+ >>> import torch
1324
+
1325
+ >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased")
1326
+ >>> model = DistilBertForMultipleChoice.from_pretrained("distilbert-base-cased")
1327
+
1328
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
1329
+ >>> choice0 = "It is eaten with a fork and a knife."
1330
+ >>> choice1 = "It is eaten while held in the hand."
1331
+ >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
1332
+
1333
+ >>> encoding = tokenizer([[prompt, choice0], [prompt, choice1]], return_tensors="pt", padding=True)
1334
+ >>> outputs = model(**{k: v.unsqueeze(0) for k, v in encoding.items()}, labels=labels) # batch size is 1
1335
+
1336
+ >>> # the linear classifier still needs to be trained
1337
+ >>> loss = outputs.loss
1338
+ >>> logits = outputs.logits
1339
+ ```"""
1340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1341
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
1342
+
1343
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
1344
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
1345
+ inputs_embeds = (
1346
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
1347
+ if inputs_embeds is not None
1348
+ else None
1349
+ )
1350
+
1351
+ outputs = self.distilbert(
1352
+ input_ids,
1353
+ attention_mask=attention_mask,
1354
+ head_mask=head_mask,
1355
+ inputs_embeds=inputs_embeds,
1356
+ output_attentions=output_attentions,
1357
+ output_hidden_states=output_hidden_states,
1358
+ return_dict=return_dict,
1359
+ )
1360
+
1361
+ hidden_state = outputs[0] # (bs * num_choices, seq_len, dim)
1362
+ pooled_output = hidden_state[:, 0] # (bs * num_choices, dim)
1363
+ pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim)
1364
+ pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim)
1365
+ pooled_output = self.dropout(pooled_output) # (bs * num_choices, dim)
1366
+ logits = self.classifier(pooled_output) # (bs * num_choices, 1)
1367
+
1368
+ reshaped_logits = logits.view(-1, num_choices) # (bs, num_choices)
1369
+
1370
+ loss = None
1371
+ if labels is not None:
1372
+ loss_fct = CrossEntropyLoss()
1373
+ loss = loss_fct(reshaped_logits, labels)
1374
+
1375
+ if not return_dict:
1376
+ output = (reshaped_logits,) + outputs[1:]
1377
+ return ((loss,) + output) if loss is not None else output
1378
+
1379
+ return MultipleChoiceModelOutput(
1380
+ loss=loss,
1381
+ logits=reshaped_logits,
1382
+ hidden_states=outputs.hidden_states,
1383
+ attentions=outputs.attentions,
1384
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_flax_distilbert.py ADDED
@@ -0,0 +1,895 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import math
17
+ from typing import Callable, Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.traverse_util import flatten_dict, unflatten_dict
25
+ from jax import lax
26
+
27
+ from ...modeling_flax_outputs import (
28
+ FlaxBaseModelOutput,
29
+ FlaxMaskedLMOutput,
30
+ FlaxMultipleChoiceModelOutput,
31
+ FlaxQuestionAnsweringModelOutput,
32
+ FlaxSequenceClassifierOutput,
33
+ FlaxTokenClassifierOutput,
34
+ )
35
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
37
+ from .configuration_distilbert import DistilBertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
43
+ _CONFIG_FOR_DOC = "DistilBertConfig"
44
+
45
+
46
+ FLAX_DISTILBERT_START_DOCSTRING = r"""
47
+
48
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
49
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
50
+
51
+ This model is also a
52
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
53
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
54
+ behavior.
55
+
56
+ Finally, this model supports inherent JAX features such as:
57
+
58
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
59
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
60
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
61
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
62
+
63
+ Parameters:
64
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
65
+ Initializing with a config file does not load the weights associated with the model, only the
66
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
67
+ """
68
+
69
+ DISTILBERT_INPUTS_DOCSTRING = r"""
70
+ Args:
71
+ input_ids (`numpy.ndarray` of shape `({0})`):
72
+ Indices of input sequence tokens in the vocabulary.
73
+
74
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
75
+ [`PreTrainedTokenizer.__call__`] for details.
76
+
77
+ [What are input IDs?](../glossary#input-ids)
78
+ attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ output_attentions (`bool`, *optional*):
86
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
87
+ tensors for more detail.
88
+ output_hidden_states (`bool`, *optional*):
89
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
90
+ more detail.
91
+ return_dict (`bool`, *optional*):
92
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
93
+ """
94
+
95
+
96
+ def get_angles(pos, i, d_model):
97
+ angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
98
+ return pos * angle_rates
99
+
100
+
101
+ def positional_encoding(position, d_model):
102
+ # create the sinusoidal pattern for the positional encoding
103
+ angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model)
104
+
105
+ # apply sin to even indices in the array; 2i
106
+ angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
107
+
108
+ # apply cos to odd indices in the array; 2i+1
109
+ angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
110
+
111
+ pos_encoding = angle_rads[np.newaxis, ...]
112
+
113
+ return jnp.array(pos_encoding)
114
+
115
+
116
+ class FlaxEmbeddings(nn.Module):
117
+ """Construct the embeddings from word, position and token_type embeddings."""
118
+
119
+ config: DistilBertConfig
120
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
121
+
122
+ def setup(self):
123
+ self.word_embeddings = nn.Embed(
124
+ self.config.vocab_size,
125
+ self.config.dim,
126
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
127
+ )
128
+ if not self.config.sinusoidal_pos_embds:
129
+ self.position_embeddings = nn.Embed(
130
+ self.config.max_position_embeddings,
131
+ self.config.dim,
132
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
133
+ )
134
+ else:
135
+ self.pos_encoding = positional_encoding(self.config.max_position_embeddings, self.config.dim)
136
+ self.LayerNorm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
137
+ self.dropout = nn.Dropout(rate=self.config.dropout)
138
+
139
+ def __call__(self, input_ids, deterministic: bool = True):
140
+ # Embed
141
+ batch_size, seq_length = input_ids.shape
142
+ inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
143
+ if not self.config.sinusoidal_pos_embds:
144
+ position_ids = jnp.arange(seq_length).astype("i4")
145
+ position_ids = jnp.broadcast_to(position_ids, shape=(batch_size, seq_length))
146
+ position_embeds = self.position_embeddings(position_ids.astype("i4"))
147
+ else:
148
+ position_embeds = self.pos_encoding[:, :seq_length, :]
149
+ # explicitly cast the positions here, since self.embed_positions are not registered as parameters
150
+ position_embeds = position_embeds.astype(inputs_embeds.dtype)
151
+
152
+ # Sum all embeddings
153
+ hidden_states = inputs_embeds + position_embeds
154
+
155
+ # Layer Norm
156
+ hidden_states = self.LayerNorm(hidden_states)
157
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
158
+ return hidden_states
159
+
160
+
161
+ class FlaxMultiHeadSelfAttention(nn.Module):
162
+ config: DistilBertConfig
163
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
164
+
165
+ def setup(self):
166
+ self.n_heads = self.config.n_heads
167
+ self.dim = self.config.dim
168
+ self.dropout = nn.Dropout(rate=self.config.attention_dropout)
169
+
170
+ if not (self.dim % self.n_heads == 0):
171
+ raise ValueError(f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}")
172
+
173
+ self.q_lin = nn.Dense(
174
+ self.dim,
175
+ dtype=self.dtype,
176
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
177
+ )
178
+ self.k_lin = nn.Dense(
179
+ self.dim,
180
+ dtype=self.dtype,
181
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
182
+ )
183
+ self.v_lin = nn.Dense(
184
+ self.dim,
185
+ dtype=self.dtype,
186
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
187
+ )
188
+ self.out_lin = nn.Dense(
189
+ self.dim,
190
+ dtype=self.dtype,
191
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
192
+ )
193
+
194
+ def __call__(
195
+ self,
196
+ query,
197
+ key,
198
+ value,
199
+ mask,
200
+ deterministic: bool = True,
201
+ output_attentions: bool = False,
202
+ ):
203
+ bs, q_len, dim = query.shape
204
+ k_len = key.shape[1]
205
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
206
+ # assert key.size() == value.size()
207
+
208
+ dim_per_head = self.dim // self.n_heads
209
+
210
+ mask_reshp = (bs, 1, 1, k_len)
211
+
212
+ def shape(x):
213
+ """separate heads"""
214
+ return x.reshape(bs, -1, self.n_heads, dim_per_head).transpose(0, 2, 1, 3)
215
+
216
+ def unshape(x):
217
+ """group heads"""
218
+ return x.transpose(0, 2, 1, 3).reshape(bs, -1, self.n_heads * dim_per_head)
219
+
220
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_len, dim_per_head)
221
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_len, dim_per_head)
222
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_len, dim_per_head)
223
+
224
+ q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_len, dim_per_head)
225
+ scores = jnp.matmul(q, k.transpose(0, 1, 3, 2)) # (bs, n_heads, q_len, k_len)
226
+ mask = jnp.reshape(mask, mask_reshp)
227
+
228
+ mask = mask.astype(scores.dtype)
229
+ scores = scores - 1e30 * (1.0 - mask)
230
+
231
+ weights = nn.softmax(scores, axis=-1) # (bs, n_heads, q_len, k_len)
232
+ weights = self.dropout(weights, deterministic=deterministic)
233
+
234
+ context = jnp.matmul(weights, v) # (bs, n_heads, q_len, dim_per_head)
235
+ context = unshape(context) # (bs, q_len, dim)
236
+ context = self.out_lin(context) # (bs, q_len, dim)
237
+
238
+ if output_attentions:
239
+ return (context, weights)
240
+ else:
241
+ return (context,)
242
+
243
+
244
+ class FlaxFFN(nn.Module):
245
+ config: DistilBertConfig
246
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
247
+
248
+ def setup(self):
249
+ self.dropout = nn.Dropout(rate=self.config.dropout)
250
+ self.chunk_size_feed_forward = self.config.chunk_size_feed_forward
251
+ self.seq_len_dim = 1
252
+ self.lin1 = nn.Dense(
253
+ self.config.hidden_dim,
254
+ dtype=self.dtype,
255
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
256
+ )
257
+ self.lin2 = nn.Dense(
258
+ self.config.dim,
259
+ dtype=self.dtype,
260
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
261
+ )
262
+
263
+ self.activation = ACT2FN[self.config.activation]
264
+
265
+ def __call__(self, hidden_states, deterministic: bool = True):
266
+ hidden_states = self.lin1(hidden_states)
267
+ hidden_states = self.activation(hidden_states)
268
+ hidden_states = self.lin2(hidden_states)
269
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
270
+ return hidden_states
271
+
272
+
273
+ class FlaxTransformerBlock(nn.Module):
274
+ config: DistilBertConfig
275
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
276
+
277
+ def setup(self):
278
+ assert (
279
+ self.config.dim % self.config.n_heads == 0
280
+ ), f"Hidden size {self.config.dim} not dividable by number of heads {self.config.n_heads}"
281
+
282
+ self.attention = FlaxMultiHeadSelfAttention(self.config, dtype=self.dtype)
283
+ self.sa_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
284
+
285
+ self.ffn = FlaxFFN(self.config, dtype=self.dtype)
286
+ self.output_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
287
+
288
+ def __call__(
289
+ self,
290
+ hidden_states,
291
+ attn_mask,
292
+ output_attentions: bool = False,
293
+ deterministic: bool = True,
294
+ ):
295
+ # Self-Attention
296
+ sa_output = self.attention(
297
+ query=hidden_states,
298
+ key=hidden_states,
299
+ value=hidden_states,
300
+ mask=attn_mask,
301
+ output_attentions=output_attentions,
302
+ deterministic=deterministic,
303
+ )
304
+ if output_attentions:
305
+ sa_output, sa_weights = sa_output
306
+ else:
307
+ assert type(sa_output) == tuple
308
+ sa_output = sa_output[0]
309
+ sa_output = self.sa_layer_norm(sa_output + hidden_states)
310
+
311
+ # Feed Forward Network
312
+ ffn_output = self.ffn(sa_output, deterministic=deterministic)
313
+ ffn_output = self.output_layer_norm(ffn_output + sa_output)
314
+ output = (ffn_output,)
315
+ if output_attentions:
316
+ output = (sa_weights,) + output
317
+ return output
318
+
319
+
320
+ class FlaxTransformer(nn.Module):
321
+ config: DistilBertConfig
322
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
323
+
324
+ def setup(self):
325
+ self.layers = [
326
+ FlaxTransformerBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.n_layers)
327
+ ]
328
+
329
+ def __call__(
330
+ self,
331
+ hidden_states,
332
+ attention_mask,
333
+ output_attentions: bool = False,
334
+ output_hidden_states: bool = False,
335
+ deterministic: bool = True,
336
+ return_dict: bool = False,
337
+ ):
338
+ all_hidden_states = () if output_hidden_states else None
339
+ all_attentions = () if output_attentions else None
340
+
341
+ for layer_module in self.layers:
342
+ if output_hidden_states:
343
+ all_hidden_states = all_hidden_states + (hidden_states,)
344
+
345
+ layer_outputs = layer_module(
346
+ hidden_states=hidden_states,
347
+ attn_mask=attention_mask,
348
+ output_attentions=output_attentions,
349
+ deterministic=deterministic,
350
+ )
351
+ hidden_states = layer_outputs[-1]
352
+
353
+ if output_attentions:
354
+ assert len(layer_outputs) == 2
355
+ attentions = layer_outputs[0]
356
+ all_attentions = all_attentions + (attentions,)
357
+ else:
358
+ assert len(layer_outputs) == 1
359
+
360
+ # Add last layer
361
+ if output_hidden_states:
362
+ all_hidden_states = all_hidden_states + (hidden_states,)
363
+
364
+ if not return_dict:
365
+ return tuple(v for v in [hidden_states, all_attentions, all_hidden_states] if v is not None)
366
+ return FlaxBaseModelOutput(
367
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
368
+ )
369
+
370
+
371
+ class FlaxTransformerEncoder(nn.Module):
372
+ config: DistilBertConfig
373
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
374
+
375
+ def setup(self):
376
+ self.layer = FlaxTransformer(self.config, dtype=self.dtype)
377
+
378
+ def __call__(
379
+ self,
380
+ hidden_states,
381
+ attention_mask,
382
+ output_attentions: bool = False,
383
+ output_hidden_states: bool = False,
384
+ deterministic: bool = True,
385
+ return_dict: bool = False,
386
+ ):
387
+ return self.layer(
388
+ hidden_states=hidden_states,
389
+ attention_mask=attention_mask,
390
+ output_attentions=output_attentions,
391
+ output_hidden_states=output_hidden_states,
392
+ deterministic=deterministic,
393
+ return_dict=return_dict,
394
+ )
395
+
396
+
397
+ class FlaxDistilBertLMDecoder(nn.Module):
398
+ config: DistilBertConfig
399
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
400
+ bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
401
+
402
+ def setup(self):
403
+ self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))
404
+
405
+ def __call__(self, inputs, kernel):
406
+ inputs = jnp.asarray(inputs, self.dtype)
407
+ kernel = jnp.asarray(kernel, self.dtype)
408
+ y = lax.dot_general(inputs, kernel, (((inputs.ndim - 1,), (0,)), ((), ())))
409
+ bias = jnp.asarray(self.bias, self.dtype)
410
+ y = y + bias
411
+ return y
412
+
413
+
414
+ class FlaxDistilBertPreTrainedModel(FlaxPreTrainedModel):
415
+ """
416
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
417
+ models.
418
+ """
419
+
420
+ config_class = DistilBertConfig
421
+ base_model_prefix = "distilbert"
422
+ module_class: nn.Module = None
423
+
424
+ def __init__(
425
+ self,
426
+ config: DistilBertConfig,
427
+ input_shape: Tuple = (1, 1),
428
+ seed: int = 0,
429
+ dtype: jnp.dtype = jnp.float32,
430
+ _do_init: bool = True,
431
+ **kwargs,
432
+ ):
433
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
434
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
435
+
436
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
437
+ # init input tensors
438
+ input_ids = jnp.zeros(input_shape, dtype="i4")
439
+ attention_mask = jnp.ones_like(input_ids)
440
+
441
+ params_rng, dropout_rng = jax.random.split(rng)
442
+ rngs = {"params": params_rng, "dropout": dropout_rng}
443
+
444
+ random_params = self.module.init(rngs, input_ids, attention_mask, return_dict=False)["params"]
445
+
446
+ if params is not None:
447
+ random_params = flatten_dict(unfreeze(random_params))
448
+ params = flatten_dict(unfreeze(params))
449
+ for missing_key in self._missing_keys:
450
+ params[missing_key] = random_params[missing_key]
451
+ self._missing_keys = set()
452
+ return freeze(unflatten_dict(params))
453
+ else:
454
+ return random_params
455
+
456
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
457
+ def __call__(
458
+ self,
459
+ input_ids,
460
+ attention_mask=None,
461
+ head_mask=None,
462
+ params: dict = None,
463
+ dropout_rng: jax.random.PRNGKey = None,
464
+ train: bool = False,
465
+ output_attentions: Optional[bool] = None,
466
+ output_hidden_states: Optional[bool] = None,
467
+ return_dict: Optional[bool] = None,
468
+ ):
469
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
470
+ output_hidden_states = (
471
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
472
+ )
473
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
474
+
475
+ if attention_mask is None:
476
+ attention_mask = jnp.ones_like(input_ids)
477
+
478
+ # Handle any PRNG if needed
479
+ rngs = {}
480
+ if dropout_rng is not None:
481
+ rngs["dropout"] = dropout_rng
482
+
483
+ return self.module.apply(
484
+ {"params": params or self.params},
485
+ jnp.array(input_ids, dtype="i4"),
486
+ jnp.array(attention_mask, dtype="i4"),
487
+ not train,
488
+ output_attentions,
489
+ output_hidden_states,
490
+ return_dict,
491
+ rngs=rngs,
492
+ )
493
+
494
+
495
+ class FlaxDistilBertModule(nn.Module):
496
+ config: DistilBertConfig
497
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
498
+
499
+ def setup(self):
500
+ self.embeddings = FlaxEmbeddings(self.config, dtype=self.dtype)
501
+ self.transformer = FlaxTransformerEncoder(self.config, dtype=self.dtype)
502
+
503
+ def __call__(
504
+ self,
505
+ input_ids,
506
+ attention_mask,
507
+ deterministic: bool = True,
508
+ output_attentions: bool = False,
509
+ output_hidden_states: bool = False,
510
+ return_dict: bool = True,
511
+ ):
512
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
513
+ output_hidden_states = (
514
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
515
+ )
516
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
517
+
518
+ input_embeds = self.embeddings(input_ids, deterministic=deterministic)
519
+ return self.transformer(
520
+ hidden_states=input_embeds,
521
+ attention_mask=attention_mask,
522
+ deterministic=deterministic,
523
+ output_attentions=output_attentions,
524
+ output_hidden_states=output_hidden_states,
525
+ return_dict=return_dict,
526
+ )
527
+
528
+
529
+ @add_start_docstrings(
530
+ "The bare DistilBert Model transformer outputting raw hidden-states without any specific head on top.",
531
+ FLAX_DISTILBERT_START_DOCSTRING,
532
+ )
533
+ class FlaxDistilBertModel(FlaxDistilBertPreTrainedModel):
534
+ module_class = FlaxDistilBertModule
535
+
536
+
537
+ append_call_sample_docstring(FlaxDistilBertModel, _CHECKPOINT_FOR_DOC, None, _CONFIG_FOR_DOC)
538
+
539
+
540
+ class FlaxDistilBertForMaskedLMModule(nn.Module):
541
+ config: DistilBertConfig
542
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
543
+
544
+ def setup(self):
545
+ self.distilbert = FlaxDistilBertModule(self.config, dtype=self.dtype)
546
+ self.vocab_transform = nn.Dense(
547
+ self.config.dim,
548
+ dtype=self.dtype,
549
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
550
+ )
551
+ self.vocab_layer_norm = nn.LayerNorm(epsilon=1e-12, dtype=self.dtype)
552
+ if self.config.tie_word_embeddings:
553
+ self.vocab_projector = FlaxDistilBertLMDecoder(
554
+ self.config,
555
+ dtype=self.dtype,
556
+ )
557
+ else:
558
+ self.vocab_projector = nn.Dense(
559
+ self.config.vocab_size,
560
+ dtype=self.dtype,
561
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
562
+ )
563
+
564
+ def __call__(
565
+ self,
566
+ input_ids,
567
+ attention_mask,
568
+ deterministic: bool = True,
569
+ output_attentions: bool = False,
570
+ output_hidden_states: bool = False,
571
+ return_dict: bool = True,
572
+ ):
573
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
574
+
575
+ dlbrt_output = self.distilbert(
576
+ input_ids=input_ids,
577
+ attention_mask=attention_mask,
578
+ output_attentions=output_attentions,
579
+ output_hidden_states=output_hidden_states,
580
+ deterministic=deterministic,
581
+ return_dict=return_dict,
582
+ )
583
+ hidden_states = dlbrt_output[0]
584
+ prediction_logits = self.vocab_transform(hidden_states)
585
+ prediction_logits = ACT2FN[self.config.activation](prediction_logits)
586
+ prediction_logits = self.vocab_layer_norm(prediction_logits)
587
+
588
+ if self.config.tie_word_embeddings:
589
+ shared_embedding = self.distilbert.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
590
+ prediction_logits = self.vocab_projector(prediction_logits, shared_embedding.T)
591
+ else:
592
+ prediction_logits = self.vocab_projector(prediction_logits)
593
+
594
+ if not return_dict:
595
+ output = (prediction_logits,) + dlbrt_output[1:]
596
+ return output
597
+
598
+ return FlaxMaskedLMOutput(
599
+ logits=prediction_logits,
600
+ hidden_states=dlbrt_output.hidden_states,
601
+ attentions=dlbrt_output.attentions,
602
+ )
603
+
604
+
605
+ @add_start_docstrings("""DistilBert Model with a `language modeling` head on top.""", FLAX_DISTILBERT_START_DOCSTRING)
606
+ class FlaxDistilBertForMaskedLM(FlaxDistilBertPreTrainedModel):
607
+ module_class = FlaxDistilBertForMaskedLMModule
608
+
609
+
610
+ append_call_sample_docstring(FlaxDistilBertForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
611
+
612
+
613
+ class FlaxDistilBertForSequenceClassificationModule(nn.Module):
614
+ config: DistilBertConfig
615
+ dtype: jnp.dtype = jnp.float32
616
+
617
+ def setup(self):
618
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
619
+ self.pre_classifier = nn.Dense(
620
+ self.config.dim,
621
+ dtype=self.dtype,
622
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
623
+ )
624
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
625
+ self.classifier = nn.Dense(
626
+ self.config.num_labels,
627
+ dtype=self.dtype,
628
+ )
629
+
630
+ def __call__(
631
+ self,
632
+ input_ids,
633
+ attention_mask,
634
+ deterministic: bool = True,
635
+ output_attentions: bool = False,
636
+ output_hidden_states: bool = False,
637
+ return_dict: bool = True,
638
+ ):
639
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
640
+ # Model
641
+ distilbert_output = self.distilbert(
642
+ input_ids,
643
+ attention_mask,
644
+ deterministic=deterministic,
645
+ output_attentions=output_attentions,
646
+ output_hidden_states=output_hidden_states,
647
+ return_dict=return_dict,
648
+ )
649
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
650
+ pooled_output = hidden_state[:, 0] # (bs, dim)
651
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
652
+ pooled_output = ACT2FN["relu"](pooled_output)
653
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
654
+ logits = self.classifier(pooled_output) # (bs, dim)
655
+
656
+ if not return_dict:
657
+ return (logits,) + distilbert_output[1:]
658
+
659
+ return FlaxSequenceClassifierOutput(
660
+ logits=logits,
661
+ hidden_states=distilbert_output.hidden_states,
662
+ attentions=distilbert_output.attentions,
663
+ )
664
+
665
+
666
+ @add_start_docstrings(
667
+ """
668
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
669
+ pooled output) e.g. for GLUE tasks.
670
+ """,
671
+ FLAX_DISTILBERT_START_DOCSTRING,
672
+ )
673
+ class FlaxDistilBertForSequenceClassification(FlaxDistilBertPreTrainedModel):
674
+ module_class = FlaxDistilBertForSequenceClassificationModule
675
+
676
+
677
+ append_call_sample_docstring(
678
+ FlaxDistilBertForSequenceClassification,
679
+ _CHECKPOINT_FOR_DOC,
680
+ FlaxSequenceClassifierOutput,
681
+ _CONFIG_FOR_DOC,
682
+ )
683
+
684
+
685
+ class FlaxDistilBertForMultipleChoiceModule(nn.Module):
686
+ config: DistilBertConfig
687
+ dtype: jnp.dtype = jnp.float32
688
+
689
+ def setup(self):
690
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
691
+ self.pre_classifier = nn.Dense(
692
+ self.config.dim,
693
+ dtype=self.dtype,
694
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
695
+ )
696
+ self.dropout = nn.Dropout(rate=self.config.seq_classif_dropout)
697
+ self.classifier = nn.Dense(
698
+ 1,
699
+ dtype=self.dtype,
700
+ )
701
+
702
+ def __call__(
703
+ self,
704
+ input_ids,
705
+ attention_mask,
706
+ deterministic: bool = True,
707
+ output_attentions: bool = False,
708
+ output_hidden_states: bool = False,
709
+ return_dict: bool = True,
710
+ ):
711
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
712
+ num_choices = input_ids.shape[1]
713
+ input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
714
+ attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
715
+
716
+ # Model
717
+ outputs = self.distilbert(
718
+ input_ids,
719
+ attention_mask,
720
+ deterministic=deterministic,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ )
725
+
726
+ hidden_state = outputs[0]
727
+ pooled_output = hidden_state[:, 0]
728
+ pooled_output = self.pre_classifier(pooled_output)
729
+ pooled_output = ACT2FN["relu"](pooled_output)
730
+ pooled_output = self.dropout(pooled_output, deterministic=deterministic)
731
+ logits = self.classifier(pooled_output)
732
+
733
+ reshaped_logits = logits.reshape(-1, num_choices)
734
+
735
+ if not return_dict:
736
+ return (reshaped_logits,) + outputs[2:]
737
+
738
+ return FlaxMultipleChoiceModelOutput(
739
+ logits=reshaped_logits,
740
+ hidden_states=outputs.hidden_states,
741
+ attentions=outputs.attentions,
742
+ )
743
+
744
+
745
+ @add_start_docstrings(
746
+ """
747
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
748
+ a softmax) e.g. for RocStories/SWAG tasks.
749
+ """,
750
+ FLAX_DISTILBERT_START_DOCSTRING,
751
+ )
752
+ class FlaxDistilBertForMultipleChoice(FlaxDistilBertPreTrainedModel):
753
+ module_class = FlaxDistilBertForMultipleChoiceModule
754
+
755
+
756
+ overwrite_call_docstring(
757
+ FlaxDistilBertForMultipleChoice, DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
758
+ )
759
+ append_call_sample_docstring(
760
+ FlaxDistilBertForMultipleChoice,
761
+ _CHECKPOINT_FOR_DOC,
762
+ FlaxMultipleChoiceModelOutput,
763
+ _CONFIG_FOR_DOC,
764
+ )
765
+
766
+
767
+ class FlaxDistilBertForTokenClassificationModule(nn.Module):
768
+ config: DistilBertConfig
769
+ dtype: jnp.dtype = jnp.float32
770
+
771
+ def setup(self):
772
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
773
+ self.dropout = nn.Dropout(rate=self.config.dropout)
774
+ self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
775
+
776
+ def __call__(
777
+ self,
778
+ input_ids,
779
+ attention_mask,
780
+ deterministic: bool = True,
781
+ output_attentions: bool = False,
782
+ output_hidden_states: bool = False,
783
+ return_dict: bool = True,
784
+ ):
785
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
786
+ # Model
787
+ outputs = self.distilbert(
788
+ input_ids,
789
+ attention_mask,
790
+ deterministic=deterministic,
791
+ output_attentions=output_attentions,
792
+ output_hidden_states=output_hidden_states,
793
+ return_dict=return_dict,
794
+ )
795
+
796
+ hidden_states = outputs[0]
797
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
798
+ logits = self.classifier(hidden_states)
799
+
800
+ if not return_dict:
801
+ return (logits,) + outputs[1:]
802
+
803
+ return FlaxTokenClassifierOutput(
804
+ logits=logits,
805
+ hidden_states=outputs.hidden_states,
806
+ attentions=outputs.attentions,
807
+ )
808
+
809
+
810
+ @add_start_docstrings(
811
+ """
812
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
813
+ for Named-Entity-Recognition (NER) tasks.
814
+ """,
815
+ FLAX_DISTILBERT_START_DOCSTRING,
816
+ )
817
+ class FlaxDistilBertForTokenClassification(FlaxDistilBertPreTrainedModel):
818
+ module_class = FlaxDistilBertForTokenClassificationModule
819
+
820
+
821
+ append_call_sample_docstring(
822
+ FlaxDistilBertForTokenClassification,
823
+ _CHECKPOINT_FOR_DOC,
824
+ FlaxTokenClassifierOutput,
825
+ _CONFIG_FOR_DOC,
826
+ )
827
+
828
+
829
+ class FlaxDistilBertForQuestionAnsweringModule(nn.Module):
830
+ config: DistilBertConfig
831
+ dtype: jnp.dtype = jnp.float32
832
+
833
+ def setup(self):
834
+ self.distilbert = FlaxDistilBertModule(config=self.config, dtype=self.dtype)
835
+ self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
836
+ assert self.config.num_labels == 2
837
+ self.dropout = nn.Dropout(rate=self.config.qa_dropout)
838
+
839
+ def __call__(
840
+ self,
841
+ input_ids,
842
+ attention_mask,
843
+ deterministic: bool = True,
844
+ output_attentions: bool = False,
845
+ output_hidden_states: bool = False,
846
+ return_dict: bool = True,
847
+ ):
848
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
849
+
850
+ # Model
851
+ distilbert_output = self.distilbert(
852
+ input_ids,
853
+ attention_mask,
854
+ deterministic=deterministic,
855
+ output_attentions=output_attentions,
856
+ output_hidden_states=output_hidden_states,
857
+ return_dict=return_dict,
858
+ )
859
+
860
+ hidden_states = distilbert_output[0]
861
+
862
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
863
+ logits = self.qa_outputs(hidden_states)
864
+ start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
865
+ start_logits = start_logits.squeeze(-1)
866
+ end_logits = end_logits.squeeze(-1)
867
+
868
+ if not return_dict:
869
+ return (start_logits, end_logits) + distilbert_output[1:]
870
+
871
+ return FlaxQuestionAnsweringModelOutput(
872
+ start_logits=start_logits,
873
+ end_logits=end_logits,
874
+ hidden_states=distilbert_output.hidden_states,
875
+ attentions=distilbert_output.attentions,
876
+ )
877
+
878
+
879
+ @add_start_docstrings(
880
+ """
881
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
882
+ linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
883
+ """,
884
+ FLAX_DISTILBERT_START_DOCSTRING,
885
+ )
886
+ class FlaxDistilBertForQuestionAnswering(FlaxDistilBertPreTrainedModel):
887
+ module_class = FlaxDistilBertForQuestionAnsweringModule
888
+
889
+
890
+ append_call_sample_docstring(
891
+ FlaxDistilBertForQuestionAnswering,
892
+ _CHECKPOINT_FOR_DOC,
893
+ FlaxQuestionAnsweringModelOutput,
894
+ _CONFIG_FOR_DOC,
895
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/modeling_tf_distilbert.py ADDED
@@ -0,0 +1,1139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ TF 2.0 DistilBERT model
17
+ """
18
+
19
+
20
+ from __future__ import annotations
21
+
22
+ import warnings
23
+ from typing import Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_outputs import (
30
+ TFBaseModelOutput,
31
+ TFMaskedLMOutput,
32
+ TFMultipleChoiceModelOutput,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutput,
35
+ TFTokenClassifierOutput,
36
+ )
37
+ from ...modeling_tf_utils import (
38
+ TFMaskedLanguageModelingLoss,
39
+ TFModelInputType,
40
+ TFMultipleChoiceLoss,
41
+ TFPreTrainedModel,
42
+ TFQuestionAnsweringLoss,
43
+ TFSequenceClassificationLoss,
44
+ TFTokenClassificationLoss,
45
+ get_initializer,
46
+ keras,
47
+ keras_serializable,
48
+ unpack_inputs,
49
+ )
50
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
51
+ from ...utils import (
52
+ add_code_sample_docstrings,
53
+ add_start_docstrings,
54
+ add_start_docstrings_to_model_forward,
55
+ logging,
56
+ )
57
+ from .configuration_distilbert import DistilBertConfig
58
+
59
+
60
+ logger = logging.get_logger(__name__)
61
+
62
+ _CHECKPOINT_FOR_DOC = "distilbert-base-uncased"
63
+ _CONFIG_FOR_DOC = "DistilBertConfig"
64
+
65
+
66
+ from ..deprecated._archive_maps import TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
67
+
68
+
69
+ class TFEmbeddings(keras.layers.Layer):
70
+ """Construct the embeddings from word, position and token_type embeddings."""
71
+
72
+ def __init__(self, config, **kwargs):
73
+ super().__init__(**kwargs)
74
+ self.config = config
75
+ self.dim = config.dim
76
+ self.initializer_range = config.initializer_range
77
+ self.max_position_embeddings = config.max_position_embeddings
78
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=1e-12, name="LayerNorm")
79
+ self.dropout = keras.layers.Dropout(rate=config.dropout)
80
+
81
+ def build(self, input_shape=None):
82
+ with tf.name_scope("word_embeddings"):
83
+ self.weight = self.add_weight(
84
+ name="weight",
85
+ shape=[self.config.vocab_size, self.dim],
86
+ initializer=get_initializer(initializer_range=self.initializer_range),
87
+ )
88
+
89
+ with tf.name_scope("position_embeddings"):
90
+ self.position_embeddings = self.add_weight(
91
+ name="embeddings",
92
+ shape=[self.max_position_embeddings, self.dim],
93
+ initializer=get_initializer(initializer_range=self.initializer_range),
94
+ )
95
+
96
+ if self.built:
97
+ return
98
+ self.built = True
99
+ if getattr(self, "LayerNorm", None) is not None:
100
+ with tf.name_scope(self.LayerNorm.name):
101
+ self.LayerNorm.build([None, None, self.config.dim])
102
+
103
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
104
+ """
105
+ Applies embedding based on inputs tensor.
106
+
107
+ Returns:
108
+ final_embeddings (`tf.Tensor`): output embedding tensor.
109
+ """
110
+ assert not (input_ids is None and inputs_embeds is None)
111
+
112
+ if input_ids is not None:
113
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
114
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
115
+
116
+ input_shape = shape_list(inputs_embeds)[:-1]
117
+
118
+ if position_ids is None:
119
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
120
+
121
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
122
+ final_embeddings = inputs_embeds + position_embeds
123
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
124
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
125
+
126
+ return final_embeddings
127
+
128
+
129
+ class TFMultiHeadSelfAttention(keras.layers.Layer):
130
+ def __init__(self, config, **kwargs):
131
+ super().__init__(**kwargs)
132
+
133
+ self.n_heads = config.n_heads
134
+ self.dim = config.dim
135
+ self.dropout = keras.layers.Dropout(config.attention_dropout)
136
+ self.output_attentions = config.output_attentions
137
+
138
+ assert self.dim % self.n_heads == 0, f"Hidden size {self.dim} not dividable by number of heads {self.n_heads}"
139
+
140
+ self.q_lin = keras.layers.Dense(
141
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="q_lin"
142
+ )
143
+ self.k_lin = keras.layers.Dense(
144
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="k_lin"
145
+ )
146
+ self.v_lin = keras.layers.Dense(
147
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="v_lin"
148
+ )
149
+ self.out_lin = keras.layers.Dense(
150
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="out_lin"
151
+ )
152
+
153
+ self.pruned_heads = set()
154
+ self.config = config
155
+
156
+ def prune_heads(self, heads):
157
+ raise NotImplementedError
158
+
159
+ def call(self, query, key, value, mask, head_mask, output_attentions, training=False):
160
+ """
161
+ Parameters:
162
+ query: tf.Tensor(bs, seq_length, dim)
163
+ key: tf.Tensor(bs, seq_length, dim)
164
+ value: tf.Tensor(bs, seq_length, dim)
165
+ mask: tf.Tensor(bs, seq_length)
166
+
167
+ Returns:
168
+ weights: tf.Tensor(bs, n_heads, seq_length, seq_length) Attention weights context: tf.Tensor(bs,
169
+ seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True`
170
+ """
171
+ bs, q_length, dim = shape_list(query)
172
+ k_length = shape_list(key)[1]
173
+ # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
174
+ # assert key.size() == value.size()
175
+ dim_per_head = int(self.dim / self.n_heads)
176
+ dim_per_head = tf.cast(dim_per_head, dtype=tf.int32)
177
+ mask_reshape = [bs, 1, 1, k_length]
178
+
179
+ def shape(x):
180
+ """separate heads"""
181
+ return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
182
+
183
+ def unshape(x):
184
+ """group heads"""
185
+ return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
186
+
187
+ q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)
188
+ k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)
189
+ v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)
190
+ q = tf.cast(q, dtype=tf.float32)
191
+ q = tf.multiply(q, tf.math.rsqrt(tf.cast(dim_per_head, dtype=tf.float32)))
192
+ k = tf.cast(k, dtype=q.dtype)
193
+ scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, q_length, k_length)
194
+ mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
195
+ # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, q_length, k_length)
196
+
197
+ mask = tf.cast(mask, dtype=scores.dtype)
198
+ scores = scores - 1e30 * (1.0 - mask)
199
+ weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
200
+ weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
201
+
202
+ # Mask heads if we want to
203
+ if head_mask is not None:
204
+ weights = weights * head_mask
205
+
206
+ context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
207
+ context = unshape(context) # (bs, q_length, dim)
208
+ context = self.out_lin(context) # (bs, q_length, dim)
209
+
210
+ if output_attentions:
211
+ return (context, weights)
212
+ else:
213
+ return (context,)
214
+
215
+ def build(self, input_shape=None):
216
+ if self.built:
217
+ return
218
+ self.built = True
219
+ if getattr(self, "q_lin", None) is not None:
220
+ with tf.name_scope(self.q_lin.name):
221
+ self.q_lin.build([None, None, self.config.dim])
222
+ if getattr(self, "k_lin", None) is not None:
223
+ with tf.name_scope(self.k_lin.name):
224
+ self.k_lin.build([None, None, self.config.dim])
225
+ if getattr(self, "v_lin", None) is not None:
226
+ with tf.name_scope(self.v_lin.name):
227
+ self.v_lin.build([None, None, self.config.dim])
228
+ if getattr(self, "out_lin", None) is not None:
229
+ with tf.name_scope(self.out_lin.name):
230
+ self.out_lin.build([None, None, self.config.dim])
231
+
232
+
233
+ class TFFFN(keras.layers.Layer):
234
+ def __init__(self, config, **kwargs):
235
+ super().__init__(**kwargs)
236
+ self.dropout = keras.layers.Dropout(config.dropout)
237
+ self.lin1 = keras.layers.Dense(
238
+ config.hidden_dim, kernel_initializer=get_initializer(config.initializer_range), name="lin1"
239
+ )
240
+ self.lin2 = keras.layers.Dense(
241
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="lin2"
242
+ )
243
+ self.activation = get_tf_activation(config.activation)
244
+ self.config = config
245
+
246
+ def call(self, input, training=False):
247
+ x = self.lin1(input)
248
+ x = self.activation(x)
249
+ x = self.lin2(x)
250
+ x = self.dropout(x, training=training)
251
+ return x
252
+
253
+ def build(self, input_shape=None):
254
+ if self.built:
255
+ return
256
+ self.built = True
257
+ if getattr(self, "lin1", None) is not None:
258
+ with tf.name_scope(self.lin1.name):
259
+ self.lin1.build([None, None, self.config.dim])
260
+ if getattr(self, "lin2", None) is not None:
261
+ with tf.name_scope(self.lin2.name):
262
+ self.lin2.build([None, None, self.config.hidden_dim])
263
+
264
+
265
+ class TFTransformerBlock(keras.layers.Layer):
266
+ def __init__(self, config, **kwargs):
267
+ super().__init__(**kwargs)
268
+
269
+ self.n_heads = config.n_heads
270
+ self.dim = config.dim
271
+ self.hidden_dim = config.hidden_dim
272
+ self.dropout = keras.layers.Dropout(config.dropout)
273
+ self.activation = config.activation
274
+ self.output_attentions = config.output_attentions
275
+
276
+ assert (
277
+ config.dim % config.n_heads == 0
278
+ ), f"Hidden size {config.dim} not dividable by number of heads {config.n_heads}"
279
+
280
+ self.attention = TFMultiHeadSelfAttention(config, name="attention")
281
+ self.sa_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="sa_layer_norm")
282
+
283
+ self.ffn = TFFFN(config, name="ffn")
284
+ self.output_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="output_layer_norm")
285
+ self.config = config
286
+
287
+ def call(self, x, attn_mask, head_mask, output_attentions, training=False): # removed: src_enc=None, src_len=None
288
+ """
289
+ Parameters:
290
+ x: tf.Tensor(bs, seq_length, dim)
291
+ attn_mask: tf.Tensor(bs, seq_length)
292
+
293
+ Outputs: sa_weights: tf.Tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output:
294
+ tf.Tensor(bs, seq_length, dim) The output of the transformer block contextualization.
295
+ """
296
+ # Self-Attention
297
+ sa_output = self.attention(x, x, x, attn_mask, head_mask, output_attentions, training=training)
298
+ if output_attentions:
299
+ sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)
300
+ else: # To handle these `output_attentions` or `output_hidden_states` cases returning tuples
301
+ # assert type(sa_output) == tuple
302
+ sa_output = sa_output[0]
303
+ sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)
304
+
305
+ # Feed Forward Network
306
+ ffn_output = self.ffn(sa_output, training=training) # (bs, seq_length, dim)
307
+ ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)
308
+
309
+ output = (ffn_output,)
310
+ if output_attentions:
311
+ output = (sa_weights,) + output
312
+ return output
313
+
314
+ def build(self, input_shape=None):
315
+ if self.built:
316
+ return
317
+ self.built = True
318
+ if getattr(self, "attention", None) is not None:
319
+ with tf.name_scope(self.attention.name):
320
+ self.attention.build(None)
321
+ if getattr(self, "sa_layer_norm", None) is not None:
322
+ with tf.name_scope(self.sa_layer_norm.name):
323
+ self.sa_layer_norm.build([None, None, self.config.dim])
324
+ if getattr(self, "ffn", None) is not None:
325
+ with tf.name_scope(self.ffn.name):
326
+ self.ffn.build(None)
327
+ if getattr(self, "output_layer_norm", None) is not None:
328
+ with tf.name_scope(self.output_layer_norm.name):
329
+ self.output_layer_norm.build([None, None, self.config.dim])
330
+
331
+
332
+ class TFTransformer(keras.layers.Layer):
333
+ def __init__(self, config, **kwargs):
334
+ super().__init__(**kwargs)
335
+ self.n_layers = config.n_layers
336
+ self.output_hidden_states = config.output_hidden_states
337
+ self.output_attentions = config.output_attentions
338
+
339
+ self.layer = [TFTransformerBlock(config, name=f"layer_._{i}") for i in range(config.n_layers)]
340
+
341
+ def call(self, x, attn_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False):
342
+ # docstyle-ignore
343
+ """
344
+ Parameters:
345
+ x: tf.Tensor(bs, seq_length, dim) Input sequence embedded.
346
+ attn_mask: tf.Tensor(bs, seq_length) Attention mask on the sequence.
347
+
348
+ Returns:
349
+ hidden_state: tf.Tensor(bs, seq_length, dim)
350
+ Sequence of hidden states in the last (top) layer
351
+ all_hidden_states: Tuple[tf.Tensor(bs, seq_length, dim)]
352
+ Tuple of length n_layers with the hidden states from each layer.
353
+ Optional: only if output_hidden_states=True
354
+ all_attentions: Tuple[tf.Tensor(bs, n_heads, seq_length, seq_length)]
355
+ Tuple of length n_layers with the attention weights from each layer
356
+ Optional: only if output_attentions=True
357
+ """
358
+ all_hidden_states = () if output_hidden_states else None
359
+ all_attentions = () if output_attentions else None
360
+
361
+ hidden_state = x
362
+ for i, layer_module in enumerate(self.layer):
363
+ if output_hidden_states:
364
+ all_hidden_states = all_hidden_states + (hidden_state,)
365
+
366
+ layer_outputs = layer_module(hidden_state, attn_mask, head_mask[i], output_attentions, training=training)
367
+ hidden_state = layer_outputs[-1]
368
+
369
+ if output_attentions:
370
+ assert len(layer_outputs) == 2
371
+ attentions = layer_outputs[0]
372
+ all_attentions = all_attentions + (attentions,)
373
+ else:
374
+ assert len(layer_outputs) == 1, f"Incorrect number of outputs {len(layer_outputs)} instead of 1"
375
+
376
+ # Add last layer
377
+ if output_hidden_states:
378
+ all_hidden_states = all_hidden_states + (hidden_state,)
379
+
380
+ if not return_dict:
381
+ return tuple(v for v in [hidden_state, all_hidden_states, all_attentions] if v is not None)
382
+ return TFBaseModelOutput(
383
+ last_hidden_state=hidden_state, hidden_states=all_hidden_states, attentions=all_attentions
384
+ )
385
+
386
+ def build(self, input_shape=None):
387
+ if self.built:
388
+ return
389
+ self.built = True
390
+ if getattr(self, "layer", None) is not None:
391
+ for layer in self.layer:
392
+ with tf.name_scope(layer.name):
393
+ layer.build(None)
394
+
395
+
396
+ @keras_serializable
397
+ class TFDistilBertMainLayer(keras.layers.Layer):
398
+ config_class = DistilBertConfig
399
+
400
+ def __init__(self, config, **kwargs):
401
+ super().__init__(**kwargs)
402
+
403
+ self.config = config
404
+ self.num_hidden_layers = config.num_hidden_layers
405
+ self.output_attentions = config.output_attentions
406
+ self.output_hidden_states = config.output_hidden_states
407
+ self.return_dict = config.use_return_dict
408
+
409
+ self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings
410
+ self.transformer = TFTransformer(config, name="transformer") # Encoder
411
+
412
+ def get_input_embeddings(self):
413
+ return self.embeddings
414
+
415
+ def set_input_embeddings(self, value):
416
+ self.embeddings.weight = value
417
+ self.embeddings.vocab_size = value.shape[0]
418
+
419
+ def _prune_heads(self, heads_to_prune):
420
+ raise NotImplementedError
421
+
422
+ @unpack_inputs
423
+ def call(
424
+ self,
425
+ input_ids=None,
426
+ attention_mask=None,
427
+ head_mask=None,
428
+ inputs_embeds=None,
429
+ output_attentions=None,
430
+ output_hidden_states=None,
431
+ return_dict=None,
432
+ training=False,
433
+ ):
434
+ if input_ids is not None and inputs_embeds is not None:
435
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
436
+ elif input_ids is not None:
437
+ input_shape = shape_list(input_ids)
438
+ elif inputs_embeds is not None:
439
+ input_shape = shape_list(inputs_embeds)[:-1]
440
+ else:
441
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
442
+
443
+ if attention_mask is None:
444
+ attention_mask = tf.ones(input_shape) # (bs, seq_length)
445
+
446
+ attention_mask = tf.cast(attention_mask, dtype=tf.float32)
447
+
448
+ # Prepare head mask if needed
449
+ # 1.0 in head_mask indicate we keep the head
450
+ # attention_probs has shape bsz x n_heads x N x N
451
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
452
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
453
+ if head_mask is not None:
454
+ raise NotImplementedError
455
+ else:
456
+ head_mask = [None] * self.num_hidden_layers
457
+
458
+ embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim)
459
+ tfmr_output = self.transformer(
460
+ embedding_output,
461
+ attention_mask,
462
+ head_mask,
463
+ output_attentions,
464
+ output_hidden_states,
465
+ return_dict,
466
+ training=training,
467
+ )
468
+
469
+ return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions)
470
+
471
+ def build(self, input_shape=None):
472
+ if self.built:
473
+ return
474
+ self.built = True
475
+ if getattr(self, "embeddings", None) is not None:
476
+ with tf.name_scope(self.embeddings.name):
477
+ self.embeddings.build(None)
478
+ if getattr(self, "transformer", None) is not None:
479
+ with tf.name_scope(self.transformer.name):
480
+ self.transformer.build(None)
481
+
482
+
483
+ # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
484
+ class TFDistilBertPreTrainedModel(TFPreTrainedModel):
485
+ """
486
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
487
+ models.
488
+ """
489
+
490
+ config_class = DistilBertConfig
491
+ base_model_prefix = "distilbert"
492
+
493
+
494
+ DISTILBERT_START_DOCSTRING = r"""
495
+
496
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
497
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
498
+ etc.)
499
+
500
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
501
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
502
+ behavior.
503
+
504
+ <Tip>
505
+
506
+ TensorFlow models and layers in `transformers` accept two formats as input:
507
+
508
+ - having all inputs as keyword arguments (like PyTorch models), or
509
+ - having all inputs as a list, tuple or dict in the first positional argument.
510
+
511
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
512
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
513
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
514
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
515
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
516
+ positional argument:
517
+
518
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
519
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
520
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
521
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
522
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
523
+
524
+ Note that when creating models and layers with
525
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
526
+ about any of this, as you can just pass inputs like you would to any other Python function!
527
+
528
+ </Tip>
529
+
530
+ Parameters:
531
+ config ([`DistilBertConfig`]): Model configuration class with all the parameters of the model.
532
+ Initializing with a config file does not load the weights associated with the model, only the
533
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
534
+ """
535
+
536
+ DISTILBERT_INPUTS_DOCSTRING = r"""
537
+ Args:
538
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
539
+ Indices of input sequence tokens in the vocabulary.
540
+
541
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
542
+ [`PreTrainedTokenizer.encode`] for details.
543
+
544
+ [What are input IDs?](../glossary#input-ids)
545
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
546
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
547
+
548
+ - 1 for tokens that are **not masked**,
549
+ - 0 for tokens that are **masked**.
550
+
551
+ [What are attention masks?](../glossary#attention-mask)
552
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
553
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
554
+
555
+ - 1 indicates the head is **not masked**,
556
+ - 0 indicates the head is **masked**.
557
+
558
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
559
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
560
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
561
+ model's internal embedding lookup matrix.
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
564
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
565
+ config will be used instead.
566
+ output_hidden_states (`bool`, *optional*):
567
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
568
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
569
+ used instead.
570
+ return_dict (`bool`, *optional*):
571
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
572
+ eager mode, in graph mode the value will always be set to True.
573
+ training (`bool`, *optional*, defaults to `False`):
574
+ Whether or not to use the model in training mode (some modules like dropout modules have different
575
+ behaviors between training and evaluation).
576
+ """
577
+
578
+
579
+ @add_start_docstrings(
580
+ "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.",
581
+ DISTILBERT_START_DOCSTRING,
582
+ )
583
+ class TFDistilBertModel(TFDistilBertPreTrainedModel):
584
+ def __init__(self, config, *inputs, **kwargs):
585
+ super().__init__(config, *inputs, **kwargs)
586
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert") # Embeddings
587
+
588
+ @unpack_inputs
589
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
590
+ @add_code_sample_docstrings(
591
+ checkpoint=_CHECKPOINT_FOR_DOC,
592
+ output_type=TFBaseModelOutput,
593
+ config_class=_CONFIG_FOR_DOC,
594
+ )
595
+ def call(
596
+ self,
597
+ input_ids: TFModelInputType | None = None,
598
+ attention_mask: np.ndarray | tf.Tensor | None = None,
599
+ head_mask: np.ndarray | tf.Tensor | None = None,
600
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
601
+ output_attentions: Optional[bool] = None,
602
+ output_hidden_states: Optional[bool] = None,
603
+ return_dict: Optional[bool] = None,
604
+ training: Optional[bool] = False,
605
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
606
+ outputs = self.distilbert(
607
+ input_ids=input_ids,
608
+ attention_mask=attention_mask,
609
+ head_mask=head_mask,
610
+ inputs_embeds=inputs_embeds,
611
+ output_attentions=output_attentions,
612
+ output_hidden_states=output_hidden_states,
613
+ return_dict=return_dict,
614
+ training=training,
615
+ )
616
+ return outputs
617
+
618
+ def build(self, input_shape=None):
619
+ if self.built:
620
+ return
621
+ self.built = True
622
+ if getattr(self, "distilbert", None) is not None:
623
+ with tf.name_scope(self.distilbert.name):
624
+ self.distilbert.build(None)
625
+
626
+
627
+ class TFDistilBertLMHead(keras.layers.Layer):
628
+ def __init__(self, config, input_embeddings, **kwargs):
629
+ super().__init__(**kwargs)
630
+
631
+ self.config = config
632
+ self.dim = config.dim
633
+
634
+ # The output weights are the same as the input embeddings, but there is
635
+ # an output-only bias for each token.
636
+ self.input_embeddings = input_embeddings
637
+
638
+ def build(self, input_shape):
639
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
640
+
641
+ super().build(input_shape)
642
+
643
+ def get_output_embeddings(self):
644
+ return self.input_embeddings
645
+
646
+ def set_output_embeddings(self, value):
647
+ self.input_embeddings.weight = value
648
+ self.input_embeddings.vocab_size = shape_list(value)[0]
649
+
650
+ def get_bias(self):
651
+ return {"bias": self.bias}
652
+
653
+ def set_bias(self, value):
654
+ self.bias = value["bias"]
655
+ self.config.vocab_size = shape_list(value["bias"])[0]
656
+
657
+ def call(self, hidden_states):
658
+ seq_length = shape_list(tensor=hidden_states)[1]
659
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.dim])
660
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
661
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
662
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
663
+
664
+ return hidden_states
665
+
666
+
667
+ @add_start_docstrings(
668
+ """DistilBert Model with a `masked language modeling` head on top.""",
669
+ DISTILBERT_START_DOCSTRING,
670
+ )
671
+ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel, TFMaskedLanguageModelingLoss):
672
+ def __init__(self, config, *inputs, **kwargs):
673
+ super().__init__(config, *inputs, **kwargs)
674
+ self.config = config
675
+
676
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
677
+ self.vocab_transform = keras.layers.Dense(
678
+ config.dim, kernel_initializer=get_initializer(config.initializer_range), name="vocab_transform"
679
+ )
680
+ self.act = get_tf_activation(config.activation)
681
+ self.vocab_layer_norm = keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm")
682
+ self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector")
683
+
684
+ def get_lm_head(self):
685
+ return self.vocab_projector
686
+
687
+ def get_prefix_bias_name(self):
688
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
689
+ return self.name + "/" + self.vocab_projector.name
690
+
691
+ @unpack_inputs
692
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
693
+ @add_code_sample_docstrings(
694
+ checkpoint=_CHECKPOINT_FOR_DOC,
695
+ output_type=TFMaskedLMOutput,
696
+ config_class=_CONFIG_FOR_DOC,
697
+ )
698
+ def call(
699
+ self,
700
+ input_ids: TFModelInputType | None = None,
701
+ attention_mask: np.ndarray | tf.Tensor | None = None,
702
+ head_mask: np.ndarray | tf.Tensor | None = None,
703
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
704
+ output_attentions: Optional[bool] = None,
705
+ output_hidden_states: Optional[bool] = None,
706
+ return_dict: Optional[bool] = None,
707
+ labels: np.ndarray | tf.Tensor | None = None,
708
+ training: Optional[bool] = False,
709
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
710
+ r"""
711
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
712
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
713
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
714
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
715
+ """
716
+ distilbert_output = self.distilbert(
717
+ input_ids=input_ids,
718
+ attention_mask=attention_mask,
719
+ head_mask=head_mask,
720
+ inputs_embeds=inputs_embeds,
721
+ output_attentions=output_attentions,
722
+ output_hidden_states=output_hidden_states,
723
+ return_dict=return_dict,
724
+ training=training,
725
+ )
726
+ hidden_states = distilbert_output[0] # (bs, seq_length, dim)
727
+ prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)
728
+ prediction_logits = self.act(prediction_logits) # (bs, seq_length, dim)
729
+ prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)
730
+ prediction_logits = self.vocab_projector(prediction_logits)
731
+
732
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_logits)
733
+
734
+ if not return_dict:
735
+ output = (prediction_logits,) + distilbert_output[1:]
736
+ return ((loss,) + output) if loss is not None else output
737
+
738
+ return TFMaskedLMOutput(
739
+ loss=loss,
740
+ logits=prediction_logits,
741
+ hidden_states=distilbert_output.hidden_states,
742
+ attentions=distilbert_output.attentions,
743
+ )
744
+
745
+ def build(self, input_shape=None):
746
+ if self.built:
747
+ return
748
+ self.built = True
749
+ if getattr(self, "distilbert", None) is not None:
750
+ with tf.name_scope(self.distilbert.name):
751
+ self.distilbert.build(None)
752
+ if getattr(self, "vocab_transform", None) is not None:
753
+ with tf.name_scope(self.vocab_transform.name):
754
+ self.vocab_transform.build([None, None, self.config.dim])
755
+ if getattr(self, "vocab_layer_norm", None) is not None:
756
+ with tf.name_scope(self.vocab_layer_norm.name):
757
+ self.vocab_layer_norm.build([None, None, self.config.dim])
758
+ if getattr(self, "vocab_projector", None) is not None:
759
+ with tf.name_scope(self.vocab_projector.name):
760
+ self.vocab_projector.build(None)
761
+
762
+
763
+ @add_start_docstrings(
764
+ """
765
+ DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
766
+ pooled output) e.g. for GLUE tasks.
767
+ """,
768
+ DISTILBERT_START_DOCSTRING,
769
+ )
770
+ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel, TFSequenceClassificationLoss):
771
+ def __init__(self, config, *inputs, **kwargs):
772
+ super().__init__(config, *inputs, **kwargs)
773
+ self.num_labels = config.num_labels
774
+
775
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
776
+ self.pre_classifier = keras.layers.Dense(
777
+ config.dim,
778
+ kernel_initializer=get_initializer(config.initializer_range),
779
+ activation="relu",
780
+ name="pre_classifier",
781
+ )
782
+ self.classifier = keras.layers.Dense(
783
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
784
+ )
785
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
786
+ self.config = config
787
+
788
+ @unpack_inputs
789
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
790
+ @add_code_sample_docstrings(
791
+ checkpoint=_CHECKPOINT_FOR_DOC,
792
+ output_type=TFSequenceClassifierOutput,
793
+ config_class=_CONFIG_FOR_DOC,
794
+ )
795
+ def call(
796
+ self,
797
+ input_ids: TFModelInputType | None = None,
798
+ attention_mask: np.ndarray | tf.Tensor | None = None,
799
+ head_mask: np.ndarray | tf.Tensor | None = None,
800
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
801
+ output_attentions: Optional[bool] = None,
802
+ output_hidden_states: Optional[bool] = None,
803
+ return_dict: Optional[bool] = None,
804
+ labels: np.ndarray | tf.Tensor | None = None,
805
+ training: Optional[bool] = False,
806
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
807
+ r"""
808
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
809
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
810
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
811
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
812
+ """
813
+ distilbert_output = self.distilbert(
814
+ input_ids=input_ids,
815
+ attention_mask=attention_mask,
816
+ head_mask=head_mask,
817
+ inputs_embeds=inputs_embeds,
818
+ output_attentions=output_attentions,
819
+ output_hidden_states=output_hidden_states,
820
+ return_dict=return_dict,
821
+ training=training,
822
+ )
823
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
824
+ pooled_output = hidden_state[:, 0] # (bs, dim)
825
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
826
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
827
+ logits = self.classifier(pooled_output) # (bs, dim)
828
+
829
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
830
+
831
+ if not return_dict:
832
+ output = (logits,) + distilbert_output[1:]
833
+ return ((loss,) + output) if loss is not None else output
834
+
835
+ return TFSequenceClassifierOutput(
836
+ loss=loss,
837
+ logits=logits,
838
+ hidden_states=distilbert_output.hidden_states,
839
+ attentions=distilbert_output.attentions,
840
+ )
841
+
842
+ def build(self, input_shape=None):
843
+ if self.built:
844
+ return
845
+ self.built = True
846
+ if getattr(self, "distilbert", None) is not None:
847
+ with tf.name_scope(self.distilbert.name):
848
+ self.distilbert.build(None)
849
+ if getattr(self, "pre_classifier", None) is not None:
850
+ with tf.name_scope(self.pre_classifier.name):
851
+ self.pre_classifier.build([None, None, self.config.dim])
852
+ if getattr(self, "classifier", None) is not None:
853
+ with tf.name_scope(self.classifier.name):
854
+ self.classifier.build([None, None, self.config.dim])
855
+
856
+
857
+ @add_start_docstrings(
858
+ """
859
+ DistilBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
860
+ for Named-Entity-Recognition (NER) tasks.
861
+ """,
862
+ DISTILBERT_START_DOCSTRING,
863
+ )
864
+ class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel, TFTokenClassificationLoss):
865
+ def __init__(self, config, *inputs, **kwargs):
866
+ super().__init__(config, *inputs, **kwargs)
867
+ self.num_labels = config.num_labels
868
+
869
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
870
+ self.dropout = keras.layers.Dropout(config.dropout)
871
+ self.classifier = keras.layers.Dense(
872
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
873
+ )
874
+ self.config = config
875
+
876
+ @unpack_inputs
877
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
878
+ @add_code_sample_docstrings(
879
+ checkpoint=_CHECKPOINT_FOR_DOC,
880
+ output_type=TFTokenClassifierOutput,
881
+ config_class=_CONFIG_FOR_DOC,
882
+ )
883
+ def call(
884
+ self,
885
+ input_ids: TFModelInputType | None = None,
886
+ attention_mask: np.ndarray | tf.Tensor | None = None,
887
+ head_mask: np.ndarray | tf.Tensor | None = None,
888
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
889
+ output_attentions: Optional[bool] = None,
890
+ output_hidden_states: Optional[bool] = None,
891
+ return_dict: Optional[bool] = None,
892
+ labels: np.ndarray | tf.Tensor | None = None,
893
+ training: Optional[bool] = False,
894
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
895
+ r"""
896
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
897
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
898
+ """
899
+ outputs = self.distilbert(
900
+ input_ids=input_ids,
901
+ attention_mask=attention_mask,
902
+ head_mask=head_mask,
903
+ inputs_embeds=inputs_embeds,
904
+ output_attentions=output_attentions,
905
+ output_hidden_states=output_hidden_states,
906
+ return_dict=return_dict,
907
+ training=training,
908
+ )
909
+ sequence_output = outputs[0]
910
+ sequence_output = self.dropout(sequence_output, training=training)
911
+ logits = self.classifier(sequence_output)
912
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
913
+
914
+ if not return_dict:
915
+ output = (logits,) + outputs[1:]
916
+ return ((loss,) + output) if loss is not None else output
917
+
918
+ return TFTokenClassifierOutput(
919
+ loss=loss,
920
+ logits=logits,
921
+ hidden_states=outputs.hidden_states,
922
+ attentions=outputs.attentions,
923
+ )
924
+
925
+ def build(self, input_shape=None):
926
+ if self.built:
927
+ return
928
+ self.built = True
929
+ if getattr(self, "distilbert", None) is not None:
930
+ with tf.name_scope(self.distilbert.name):
931
+ self.distilbert.build(None)
932
+ if getattr(self, "classifier", None) is not None:
933
+ with tf.name_scope(self.classifier.name):
934
+ self.classifier.build([None, None, self.config.hidden_size])
935
+
936
+
937
+ @add_start_docstrings(
938
+ """
939
+ DistilBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
940
+ a softmax) e.g. for RocStories/SWAG tasks.
941
+ """,
942
+ DISTILBERT_START_DOCSTRING,
943
+ )
944
+ class TFDistilBertForMultipleChoice(TFDistilBertPreTrainedModel, TFMultipleChoiceLoss):
945
+ def __init__(self, config, *inputs, **kwargs):
946
+ super().__init__(config, *inputs, **kwargs)
947
+
948
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
949
+ self.dropout = keras.layers.Dropout(config.seq_classif_dropout)
950
+ self.pre_classifier = keras.layers.Dense(
951
+ config.dim,
952
+ kernel_initializer=get_initializer(config.initializer_range),
953
+ activation="relu",
954
+ name="pre_classifier",
955
+ )
956
+ self.classifier = keras.layers.Dense(
957
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
958
+ )
959
+ self.config = config
960
+
961
+ @unpack_inputs
962
+ @add_start_docstrings_to_model_forward(
963
+ DISTILBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
964
+ )
965
+ @add_code_sample_docstrings(
966
+ checkpoint=_CHECKPOINT_FOR_DOC,
967
+ output_type=TFMultipleChoiceModelOutput,
968
+ config_class=_CONFIG_FOR_DOC,
969
+ )
970
+ def call(
971
+ self,
972
+ input_ids: TFModelInputType | None = None,
973
+ attention_mask: np.ndarray | tf.Tensor | None = None,
974
+ head_mask: np.ndarray | tf.Tensor | None = None,
975
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
976
+ output_attentions: Optional[bool] = None,
977
+ output_hidden_states: Optional[bool] = None,
978
+ return_dict: Optional[bool] = None,
979
+ labels: np.ndarray | tf.Tensor | None = None,
980
+ training: Optional[bool] = False,
981
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
982
+ r"""
983
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
984
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
985
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
986
+ """
987
+ if input_ids is not None:
988
+ num_choices = shape_list(input_ids)[1]
989
+ seq_length = shape_list(input_ids)[2]
990
+ else:
991
+ num_choices = shape_list(inputs_embeds)[1]
992
+ seq_length = shape_list(inputs_embeds)[2]
993
+
994
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
995
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
996
+ flat_inputs_embeds = (
997
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
998
+ if inputs_embeds is not None
999
+ else None
1000
+ )
1001
+ distilbert_output = self.distilbert(
1002
+ flat_input_ids,
1003
+ flat_attention_mask,
1004
+ head_mask,
1005
+ flat_inputs_embeds,
1006
+ output_attentions,
1007
+ output_hidden_states,
1008
+ return_dict=return_dict,
1009
+ training=training,
1010
+ )
1011
+ hidden_state = distilbert_output[0] # (bs, seq_len, dim)
1012
+ pooled_output = hidden_state[:, 0] # (bs, dim)
1013
+ pooled_output = self.pre_classifier(pooled_output) # (bs, dim)
1014
+ pooled_output = self.dropout(pooled_output, training=training) # (bs, dim)
1015
+ logits = self.classifier(pooled_output)
1016
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
1017
+
1018
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
1019
+
1020
+ if not return_dict:
1021
+ output = (reshaped_logits,) + distilbert_output[1:]
1022
+ return ((loss,) + output) if loss is not None else output
1023
+
1024
+ return TFMultipleChoiceModelOutput(
1025
+ loss=loss,
1026
+ logits=reshaped_logits,
1027
+ hidden_states=distilbert_output.hidden_states,
1028
+ attentions=distilbert_output.attentions,
1029
+ )
1030
+
1031
+ def build(self, input_shape=None):
1032
+ if self.built:
1033
+ return
1034
+ self.built = True
1035
+ if getattr(self, "distilbert", None) is not None:
1036
+ with tf.name_scope(self.distilbert.name):
1037
+ self.distilbert.build(None)
1038
+ if getattr(self, "pre_classifier", None) is not None:
1039
+ with tf.name_scope(self.pre_classifier.name):
1040
+ self.pre_classifier.build([None, None, self.config.dim])
1041
+ if getattr(self, "classifier", None) is not None:
1042
+ with tf.name_scope(self.classifier.name):
1043
+ self.classifier.build([None, None, self.config.dim])
1044
+
1045
+
1046
+ @add_start_docstrings(
1047
+ """
1048
+ DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
1049
+ linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1050
+ """,
1051
+ DISTILBERT_START_DOCSTRING,
1052
+ )
1053
+ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel, TFQuestionAnsweringLoss):
1054
+ def __init__(self, config, *inputs, **kwargs):
1055
+ super().__init__(config, *inputs, **kwargs)
1056
+
1057
+ self.distilbert = TFDistilBertMainLayer(config, name="distilbert")
1058
+ self.qa_outputs = keras.layers.Dense(
1059
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1060
+ )
1061
+ assert config.num_labels == 2, f"Incorrect number of labels {config.num_labels} instead of 2"
1062
+ self.dropout = keras.layers.Dropout(config.qa_dropout)
1063
+ self.config = config
1064
+
1065
+ @unpack_inputs
1066
+ @add_start_docstrings_to_model_forward(DISTILBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1067
+ @add_code_sample_docstrings(
1068
+ checkpoint=_CHECKPOINT_FOR_DOC,
1069
+ output_type=TFQuestionAnsweringModelOutput,
1070
+ config_class=_CONFIG_FOR_DOC,
1071
+ )
1072
+ def call(
1073
+ self,
1074
+ input_ids: TFModelInputType | None = None,
1075
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1076
+ head_mask: np.ndarray | tf.Tensor | None = None,
1077
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1078
+ output_attentions: Optional[bool] = None,
1079
+ output_hidden_states: Optional[bool] = None,
1080
+ return_dict: Optional[bool] = None,
1081
+ start_positions: np.ndarray | tf.Tensor | None = None,
1082
+ end_positions: np.ndarray | tf.Tensor | None = None,
1083
+ training: Optional[bool] = False,
1084
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1085
+ r"""
1086
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1087
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1088
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1089
+ are not taken into account for computing the loss.
1090
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1091
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1092
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1093
+ are not taken into account for computing the loss.
1094
+ """
1095
+ distilbert_output = self.distilbert(
1096
+ input_ids=input_ids,
1097
+ attention_mask=attention_mask,
1098
+ head_mask=head_mask,
1099
+ inputs_embeds=inputs_embeds,
1100
+ output_attentions=output_attentions,
1101
+ output_hidden_states=output_hidden_states,
1102
+ return_dict=return_dict,
1103
+ training=training,
1104
+ )
1105
+ hidden_states = distilbert_output[0] # (bs, max_query_len, dim)
1106
+ hidden_states = self.dropout(hidden_states, training=training) # (bs, max_query_len, dim)
1107
+ logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)
1108
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1109
+ start_logits = tf.squeeze(start_logits, axis=-1)
1110
+ end_logits = tf.squeeze(end_logits, axis=-1)
1111
+
1112
+ loss = None
1113
+ if start_positions is not None and end_positions is not None:
1114
+ labels = {"start_position": start_positions}
1115
+ labels["end_position"] = end_positions
1116
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1117
+
1118
+ if not return_dict:
1119
+ output = (start_logits, end_logits) + distilbert_output[1:]
1120
+ return ((loss,) + output) if loss is not None else output
1121
+
1122
+ return TFQuestionAnsweringModelOutput(
1123
+ loss=loss,
1124
+ start_logits=start_logits,
1125
+ end_logits=end_logits,
1126
+ hidden_states=distilbert_output.hidden_states,
1127
+ attentions=distilbert_output.attentions,
1128
+ )
1129
+
1130
+ def build(self, input_shape=None):
1131
+ if self.built:
1132
+ return
1133
+ self.built = True
1134
+ if getattr(self, "distilbert", None) is not None:
1135
+ with tf.name_scope(self.distilbert.name):
1136
+ self.distilbert.build(None)
1137
+ if getattr(self, "qa_outputs", None) is not None:
1138
+ with tf.name_scope(self.qa_outputs.name):
1139
+ self.qa_outputs.build([None, None, self.config.dim])
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+
31
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
32
+ def load_vocab(vocab_file):
33
+ """Loads a vocabulary file into a dictionary."""
34
+ vocab = collections.OrderedDict()
35
+ with open(vocab_file, "r", encoding="utf-8") as reader:
36
+ tokens = reader.readlines()
37
+ for index, token in enumerate(tokens):
38
+ token = token.rstrip("\n")
39
+ vocab[token] = index
40
+ return vocab
41
+
42
+
43
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
44
+ def whitespace_tokenize(text):
45
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
46
+ text = text.strip()
47
+ if not text:
48
+ return []
49
+ tokens = text.split()
50
+ return tokens
51
+
52
+
53
+ class DistilBertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a DistilBERT tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original BERT).
93
+ """
94
+
95
+ vocab_files_names = VOCAB_FILES_NAMES
96
+ model_input_names = ["input_ids", "attention_mask"]
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_file,
101
+ do_lower_case=True,
102
+ do_basic_tokenize=True,
103
+ never_split=None,
104
+ unk_token="[UNK]",
105
+ sep_token="[SEP]",
106
+ pad_token="[PAD]",
107
+ cls_token="[CLS]",
108
+ mask_token="[MASK]",
109
+ tokenize_chinese_chars=True,
110
+ strip_accents=None,
111
+ **kwargs,
112
+ ):
113
+ if not os.path.isfile(vocab_file):
114
+ raise ValueError(
115
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
116
+ " model use `tokenizer = DistilBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
117
+ )
118
+ self.vocab = load_vocab(vocab_file)
119
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
120
+ self.do_basic_tokenize = do_basic_tokenize
121
+ if do_basic_tokenize:
122
+ self.basic_tokenizer = BasicTokenizer(
123
+ do_lower_case=do_lower_case,
124
+ never_split=never_split,
125
+ tokenize_chinese_chars=tokenize_chinese_chars,
126
+ strip_accents=strip_accents,
127
+ )
128
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
129
+
130
+ super().__init__(
131
+ do_lower_case=do_lower_case,
132
+ do_basic_tokenize=do_basic_tokenize,
133
+ never_split=never_split,
134
+ unk_token=unk_token,
135
+ sep_token=sep_token,
136
+ pad_token=pad_token,
137
+ cls_token=cls_token,
138
+ mask_token=mask_token,
139
+ tokenize_chinese_chars=tokenize_chinese_chars,
140
+ strip_accents=strip_accents,
141
+ **kwargs,
142
+ )
143
+
144
+ @property
145
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
146
+ def do_lower_case(self):
147
+ return self.basic_tokenizer.do_lower_case
148
+
149
+ @property
150
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
151
+ def vocab_size(self):
152
+ return len(self.vocab)
153
+
154
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
155
+ def get_vocab(self):
156
+ return dict(self.vocab, **self.added_tokens_encoder)
157
+
158
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
159
+ def _tokenize(self, text, split_special_tokens=False):
160
+ split_tokens = []
161
+ if self.do_basic_tokenize:
162
+ for token in self.basic_tokenizer.tokenize(
163
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
164
+ ):
165
+ # If the token is part of the never_split set
166
+ if token in self.basic_tokenizer.never_split:
167
+ split_tokens.append(token)
168
+ else:
169
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
170
+ else:
171
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
172
+ return split_tokens
173
+
174
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
175
+ def _convert_token_to_id(self, token):
176
+ """Converts a token (str) in an id using the vocab."""
177
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
178
+
179
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
180
+ def _convert_id_to_token(self, index):
181
+ """Converts an index (integer) in a token (str) using the vocab."""
182
+ return self.ids_to_tokens.get(index, self.unk_token)
183
+
184
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
185
+ def convert_tokens_to_string(self, tokens):
186
+ """Converts a sequence of tokens (string) in a single string."""
187
+ out_string = " ".join(tokens).replace(" ##", "").strip()
188
+ return out_string
189
+
190
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
191
+ def build_inputs_with_special_tokens(
192
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
193
+ ) -> List[int]:
194
+ """
195
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
196
+ adding special tokens. A BERT sequence has the following format:
197
+
198
+ - single sequence: `[CLS] X [SEP]`
199
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs to which the special tokens will be added.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+
207
+ Returns:
208
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
209
+ """
210
+ if token_ids_1 is None:
211
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
212
+ cls = [self.cls_token_id]
213
+ sep = [self.sep_token_id]
214
+ return cls + token_ids_0 + sep + token_ids_1 + sep
215
+
216
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
217
+ def get_special_tokens_mask(
218
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
219
+ ) -> List[int]:
220
+ """
221
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
222
+ special tokens using the tokenizer `prepare_for_model` method.
223
+
224
+ Args:
225
+ token_ids_0 (`List[int]`):
226
+ List of IDs.
227
+ token_ids_1 (`List[int]`, *optional*):
228
+ Optional second list of IDs for sequence pairs.
229
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
230
+ Whether or not the token list is already formatted with special tokens for the model.
231
+
232
+ Returns:
233
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
234
+ """
235
+
236
+ if already_has_special_tokens:
237
+ return super().get_special_tokens_mask(
238
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
239
+ )
240
+
241
+ if token_ids_1 is not None:
242
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
243
+ return [1] + ([0] * len(token_ids_0)) + [1]
244
+
245
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
246
+ def create_token_type_ids_from_sequences(
247
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
248
+ ) -> List[int]:
249
+ """
250
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
251
+ pair mask has the following format:
252
+
253
+ ```
254
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
255
+ | first sequence | second sequence |
256
+ ```
257
+
258
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
259
+
260
+ Args:
261
+ token_ids_0 (`List[int]`):
262
+ List of IDs.
263
+ token_ids_1 (`List[int]`, *optional*):
264
+ Optional second list of IDs for sequence pairs.
265
+
266
+ Returns:
267
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
268
+ """
269
+ sep = [self.sep_token_id]
270
+ cls = [self.cls_token_id]
271
+ if token_ids_1 is None:
272
+ return len(cls + token_ids_0 + sep) * [0]
273
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
274
+
275
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
276
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
277
+ index = 0
278
+ if os.path.isdir(save_directory):
279
+ vocab_file = os.path.join(
280
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
281
+ )
282
+ else:
283
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
284
+ with open(vocab_file, "w", encoding="utf-8") as writer:
285
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
286
+ if index != token_index:
287
+ logger.warning(
288
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
289
+ " Please check that the vocabulary is not corrupted!"
290
+ )
291
+ index = token_index
292
+ writer.write(token + "\n")
293
+ index += 1
294
+ return (vocab_file,)
295
+
296
+
297
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
298
+ class BasicTokenizer(object):
299
+ """
300
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
301
+
302
+ Args:
303
+ do_lower_case (`bool`, *optional*, defaults to `True`):
304
+ Whether or not to lowercase the input when tokenizing.
305
+ never_split (`Iterable`, *optional*):
306
+ Collection of tokens which will never be split during tokenization. Only has an effect when
307
+ `do_basic_tokenize=True`
308
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
309
+ Whether or not to tokenize Chinese characters.
310
+
311
+ This should likely be deactivated for Japanese (see this
312
+ [issue](https://github.com/huggingface/transformers/issues/328)).
313
+ strip_accents (`bool`, *optional*):
314
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
315
+ value for `lowercase` (as in the original BERT).
316
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
317
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
318
+ the full context of the words, such as contractions.
319
+ """
320
+
321
+ def __init__(
322
+ self,
323
+ do_lower_case=True,
324
+ never_split=None,
325
+ tokenize_chinese_chars=True,
326
+ strip_accents=None,
327
+ do_split_on_punc=True,
328
+ ):
329
+ if never_split is None:
330
+ never_split = []
331
+ self.do_lower_case = do_lower_case
332
+ self.never_split = set(never_split)
333
+ self.tokenize_chinese_chars = tokenize_chinese_chars
334
+ self.strip_accents = strip_accents
335
+ self.do_split_on_punc = do_split_on_punc
336
+
337
+ def tokenize(self, text, never_split=None):
338
+ """
339
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
340
+
341
+ Args:
342
+ never_split (`List[str]`, *optional*)
343
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
344
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
345
+ """
346
+ # union() returns a new set by concatenating the two sets.
347
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
348
+ text = self._clean_text(text)
349
+
350
+ # This was added on November 1st, 2018 for the multilingual and Chinese
351
+ # models. This is also applied to the English models now, but it doesn't
352
+ # matter since the English models were not trained on any Chinese data
353
+ # and generally don't have any Chinese data in them (there are Chinese
354
+ # characters in the vocabulary because Wikipedia does have some Chinese
355
+ # words in the English Wikipedia.).
356
+ if self.tokenize_chinese_chars:
357
+ text = self._tokenize_chinese_chars(text)
358
+ # prevents treating the same character with different unicode codepoints as different characters
359
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
360
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
361
+ split_tokens = []
362
+ for token in orig_tokens:
363
+ if token not in never_split:
364
+ if self.do_lower_case:
365
+ token = token.lower()
366
+ if self.strip_accents is not False:
367
+ token = self._run_strip_accents(token)
368
+ elif self.strip_accents:
369
+ token = self._run_strip_accents(token)
370
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
371
+
372
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
373
+ return output_tokens
374
+
375
+ def _run_strip_accents(self, text):
376
+ """Strips accents from a piece of text."""
377
+ text = unicodedata.normalize("NFD", text)
378
+ output = []
379
+ for char in text:
380
+ cat = unicodedata.category(char)
381
+ if cat == "Mn":
382
+ continue
383
+ output.append(char)
384
+ return "".join(output)
385
+
386
+ def _run_split_on_punc(self, text, never_split=None):
387
+ """Splits punctuation on a piece of text."""
388
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
389
+ return [text]
390
+ chars = list(text)
391
+ i = 0
392
+ start_new_word = True
393
+ output = []
394
+ while i < len(chars):
395
+ char = chars[i]
396
+ if _is_punctuation(char):
397
+ output.append([char])
398
+ start_new_word = True
399
+ else:
400
+ if start_new_word:
401
+ output.append([])
402
+ start_new_word = False
403
+ output[-1].append(char)
404
+ i += 1
405
+
406
+ return ["".join(x) for x in output]
407
+
408
+ def _tokenize_chinese_chars(self, text):
409
+ """Adds whitespace around any CJK character."""
410
+ output = []
411
+ for char in text:
412
+ cp = ord(char)
413
+ if self._is_chinese_char(cp):
414
+ output.append(" ")
415
+ output.append(char)
416
+ output.append(" ")
417
+ else:
418
+ output.append(char)
419
+ return "".join(output)
420
+
421
+ def _is_chinese_char(self, cp):
422
+ """Checks whether CP is the codepoint of a CJK character."""
423
+ # This defines a "chinese character" as anything in the CJK Unicode block:
424
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
425
+ #
426
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
427
+ # despite its name. The modern Korean Hangul alphabet is a different block,
428
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
429
+ # space-separated words, so they are not treated specially and handled
430
+ # like the all of the other languages.
431
+ if (
432
+ (cp >= 0x4E00 and cp <= 0x9FFF)
433
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
434
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
435
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
436
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
437
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
438
+ or (cp >= 0xF900 and cp <= 0xFAFF)
439
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
440
+ ): #
441
+ return True
442
+
443
+ return False
444
+
445
+ def _clean_text(self, text):
446
+ """Performs invalid character removal and whitespace cleanup on text."""
447
+ output = []
448
+ for char in text:
449
+ cp = ord(char)
450
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
451
+ continue
452
+ if _is_whitespace(char):
453
+ output.append(" ")
454
+ else:
455
+ output.append(char)
456
+ return "".join(output)
457
+
458
+
459
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
460
+ class WordpieceTokenizer(object):
461
+ """Runs WordPiece tokenization."""
462
+
463
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
464
+ self.vocab = vocab
465
+ self.unk_token = unk_token
466
+ self.max_input_chars_per_word = max_input_chars_per_word
467
+
468
+ def tokenize(self, text):
469
+ """
470
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
471
+ tokenization using the given vocabulary.
472
+
473
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
474
+
475
+ Args:
476
+ text: A single token or whitespace separated tokens. This should have
477
+ already been passed through *BasicTokenizer*.
478
+
479
+ Returns:
480
+ A list of wordpiece tokens.
481
+ """
482
+
483
+ output_tokens = []
484
+ for token in whitespace_tokenize(text):
485
+ chars = list(token)
486
+ if len(chars) > self.max_input_chars_per_word:
487
+ output_tokens.append(self.unk_token)
488
+ continue
489
+
490
+ is_bad = False
491
+ start = 0
492
+ sub_tokens = []
493
+ while start < len(chars):
494
+ end = len(chars)
495
+ cur_substr = None
496
+ while start < end:
497
+ substr = "".join(chars[start:end])
498
+ if start > 0:
499
+ substr = "##" + substr
500
+ if substr in self.vocab:
501
+ cur_substr = substr
502
+ break
503
+ end -= 1
504
+ if cur_substr is None:
505
+ is_bad = True
506
+ break
507
+ sub_tokens.append(cur_substr)
508
+ start = end
509
+
510
+ if is_bad:
511
+ output_tokens.append(self.unk_token)
512
+ else:
513
+ output_tokens.extend(sub_tokens)
514
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/distilbert/tokenization_distilbert_fast.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for DistilBERT."""
16
+
17
+ import json
18
+ from typing import List, Optional, Tuple
19
+
20
+ from tokenizers import normalizers
21
+
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import logging
24
+ from .tokenization_distilbert import DistilBertTokenizer
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
30
+
31
+
32
+ class DistilBertTokenizerFast(PreTrainedTokenizerFast):
33
+ r"""
34
+ Construct a "fast" DistilBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
35
+
36
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
37
+ refer to this superclass for more information regarding those methods.
38
+
39
+ Args:
40
+ vocab_file (`str`):
41
+ File containing the vocabulary.
42
+ do_lower_case (`bool`, *optional*, defaults to `True`):
43
+ Whether or not to lowercase the input when tokenizing.
44
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
48
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
49
+ sequence classification or for a text and a question for question answering. It is also used as the last
50
+ token of a sequence built with special tokens.
51
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
54
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
55
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
56
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
57
+ The token used for masking values. This is the token used when training this model with masked language
58
+ modeling. This is the token which the model will try to predict.
59
+ clean_text (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
61
+ whitespaces by the classic one.
62
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
63
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
64
+ issue](https://github.com/huggingface/transformers/issues/328)).
65
+ strip_accents (`bool`, *optional*):
66
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
67
+ value for `lowercase` (as in the original BERT).
68
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
69
+ The prefix for subwords.
70
+ """
71
+
72
+ vocab_files_names = VOCAB_FILES_NAMES
73
+ model_input_names = ["input_ids", "attention_mask"]
74
+ slow_tokenizer_class = DistilBertTokenizer
75
+
76
+ def __init__(
77
+ self,
78
+ vocab_file=None,
79
+ tokenizer_file=None,
80
+ do_lower_case=True,
81
+ unk_token="[UNK]",
82
+ sep_token="[SEP]",
83
+ pad_token="[PAD]",
84
+ cls_token="[CLS]",
85
+ mask_token="[MASK]",
86
+ tokenize_chinese_chars=True,
87
+ strip_accents=None,
88
+ **kwargs,
89
+ ):
90
+ super().__init__(
91
+ vocab_file,
92
+ tokenizer_file=tokenizer_file,
93
+ do_lower_case=do_lower_case,
94
+ unk_token=unk_token,
95
+ sep_token=sep_token,
96
+ pad_token=pad_token,
97
+ cls_token=cls_token,
98
+ mask_token=mask_token,
99
+ tokenize_chinese_chars=tokenize_chinese_chars,
100
+ strip_accents=strip_accents,
101
+ **kwargs,
102
+ )
103
+
104
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
105
+ if (
106
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
107
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
108
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
109
+ ):
110
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
111
+ normalizer_state["lowercase"] = do_lower_case
112
+ normalizer_state["strip_accents"] = strip_accents
113
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
114
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
115
+
116
+ self.do_lower_case = do_lower_case
117
+
118
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
119
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
120
+ """
121
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
122
+ adding special tokens. A BERT sequence has the following format:
123
+
124
+ - single sequence: `[CLS] X [SEP]`
125
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
126
+
127
+ Args:
128
+ token_ids_0 (`List[int]`):
129
+ List of IDs to which the special tokens will be added.
130
+ token_ids_1 (`List[int]`, *optional*):
131
+ Optional second list of IDs for sequence pairs.
132
+
133
+ Returns:
134
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
135
+ """
136
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
137
+
138
+ if token_ids_1 is not None:
139
+ output += token_ids_1 + [self.sep_token_id]
140
+
141
+ return output
142
+
143
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
144
+ def create_token_type_ids_from_sequences(
145
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
146
+ ) -> List[int]:
147
+ """
148
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
149
+ pair mask has the following format:
150
+
151
+ ```
152
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
153
+ | first sequence | second sequence |
154
+ ```
155
+
156
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
157
+
158
+ Args:
159
+ token_ids_0 (`List[int]`):
160
+ List of IDs.
161
+ token_ids_1 (`List[int]`, *optional*):
162
+ Optional second list of IDs for sequence pairs.
163
+
164
+ Returns:
165
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
166
+ """
167
+ sep = [self.sep_token_id]
168
+ cls = [self.cls_token_id]
169
+ if token_ids_1 is None:
170
+ return len(cls + token_ids_0 + sep) * [0]
171
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
172
+
173
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
174
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
175
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
176
+ return tuple(files)
llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/convert_dpr_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_dpr.cpython-310.pyc ADDED
Binary file (22.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/modeling_tf_dpr.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/dpr/__pycache__/tokenization_dpr_fast.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/configuration_focalnet.cpython-310.pyc ADDED
Binary file (7.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/convert_focalnet_to_hf_format.cpython-310.pyc ADDED
Binary file (7.07 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/focalnet/__pycache__/modeling_focalnet.cpython-310.pyc ADDED
Binary file (32.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.05 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/configuration_funnel.cpython-310.pyc ADDED
Binary file (6.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/convert_funnel_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_funnel.cpython-310.pyc ADDED
Binary file (46.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/modeling_tf_funnel.cpython-310.pyc ADDED
Binary file (54.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel.cpython-310.pyc ADDED
Binary file (17.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/__pycache__/tokenization_funnel_fast.cpython-310.pyc ADDED
Binary file (7.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/funnel/tokenization_funnel.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Tokenization class for Funnel Transformer."""
16
+
17
+ import collections
18
+ import os
19
+ import unicodedata
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
29
+
30
+ _model_names = [
31
+ "small",
32
+ "small-base",
33
+ "medium",
34
+ "medium-base",
35
+ "intermediate",
36
+ "intermediate-base",
37
+ "large",
38
+ "large-base",
39
+ "xlarge",
40
+ "xlarge-base",
41
+ ]
42
+
43
+
44
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
45
+ def load_vocab(vocab_file):
46
+ """Loads a vocabulary file into a dictionary."""
47
+ vocab = collections.OrderedDict()
48
+ with open(vocab_file, "r", encoding="utf-8") as reader:
49
+ tokens = reader.readlines()
50
+ for index, token in enumerate(tokens):
51
+ token = token.rstrip("\n")
52
+ vocab[token] = index
53
+ return vocab
54
+
55
+
56
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
57
+ def whitespace_tokenize(text):
58
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
59
+ text = text.strip()
60
+ if not text:
61
+ return []
62
+ tokens = text.split()
63
+ return tokens
64
+
65
+
66
+ class FunnelTokenizer(PreTrainedTokenizer):
67
+ r"""
68
+ Construct a Funnel Transformer tokenizer. Based on WordPiece.
69
+
70
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
71
+ this superclass for more information regarding those methods.
72
+
73
+ Args:
74
+ vocab_file (`str`):
75
+ File containing the vocabulary.
76
+ do_lower_case (`bool`, *optional*, defaults to `True`):
77
+ Whether or not to lowercase the input when tokenizing.
78
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
79
+ Whether or not to do basic tokenization before WordPiece.
80
+ never_split (`Iterable`, *optional*):
81
+ Collection of tokens which will never be split during tokenization. Only has an effect when
82
+ `do_basic_tokenize=True`
83
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
84
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
85
+ token instead.
86
+ sep_token (`str`, *optional*, defaults to `"<sep>"`):
87
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
88
+ sequence classification or for a text and a question for question answering. It is also used as the last
89
+ token of a sequence built with special tokens.
90
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
91
+ The token used for padding, for example when batching sequences of different lengths.
92
+ cls_token (`str`, *optional*, defaults to `"<cls>"`):
93
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
94
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
95
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
96
+ The token used for masking values. This is the token used when training this model with masked language
97
+ modeling. This is the token which the model will try to predict.
98
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
99
+ The beginning of sentence token.
100
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
101
+ The end of sentence token.
102
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to tokenize Chinese characters.
104
+
105
+ This should likely be deactivated for Japanese (see this
106
+ [issue](https://github.com/huggingface/transformers/issues/328)).
107
+ strip_accents (`bool`, *optional*):
108
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
109
+ value for `lowercase` (as in the original BERT).
110
+ """
111
+
112
+ vocab_files_names = VOCAB_FILES_NAMES
113
+ cls_token_type_id: int = 2
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_file,
118
+ do_lower_case=True,
119
+ do_basic_tokenize=True,
120
+ never_split=None,
121
+ unk_token="<unk>",
122
+ sep_token="<sep>",
123
+ pad_token="<pad>",
124
+ cls_token="<cls>",
125
+ mask_token="<mask>",
126
+ bos_token="<s>",
127
+ eos_token="</s>",
128
+ tokenize_chinese_chars=True,
129
+ strip_accents=None,
130
+ **kwargs,
131
+ ):
132
+ if not os.path.isfile(vocab_file):
133
+ raise ValueError(
134
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
135
+ " model use `tokenizer = FunnelTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
136
+ )
137
+ self.vocab = load_vocab(vocab_file)
138
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
139
+ self.do_basic_tokenize = do_basic_tokenize
140
+ if do_basic_tokenize:
141
+ self.basic_tokenizer = BasicTokenizer(
142
+ do_lower_case=do_lower_case,
143
+ never_split=never_split,
144
+ tokenize_chinese_chars=tokenize_chinese_chars,
145
+ strip_accents=strip_accents,
146
+ )
147
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
148
+
149
+ super().__init__(
150
+ do_lower_case=do_lower_case,
151
+ do_basic_tokenize=do_basic_tokenize,
152
+ never_split=never_split,
153
+ unk_token=unk_token,
154
+ sep_token=sep_token,
155
+ pad_token=pad_token,
156
+ cls_token=cls_token,
157
+ mask_token=mask_token,
158
+ bos_token=bos_token,
159
+ eos_token=eos_token,
160
+ tokenize_chinese_chars=tokenize_chinese_chars,
161
+ strip_accents=strip_accents,
162
+ **kwargs,
163
+ )
164
+
165
+ @property
166
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
167
+ def do_lower_case(self):
168
+ return self.basic_tokenizer.do_lower_case
169
+
170
+ @property
171
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
172
+ def vocab_size(self):
173
+ return len(self.vocab)
174
+
175
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
176
+ def get_vocab(self):
177
+ return dict(self.vocab, **self.added_tokens_encoder)
178
+
179
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
180
+ def _tokenize(self, text, split_special_tokens=False):
181
+ split_tokens = []
182
+ if self.do_basic_tokenize:
183
+ for token in self.basic_tokenizer.tokenize(
184
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
185
+ ):
186
+ # If the token is part of the never_split set
187
+ if token in self.basic_tokenizer.never_split:
188
+ split_tokens.append(token)
189
+ else:
190
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
191
+ else:
192
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
193
+ return split_tokens
194
+
195
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
196
+ def _convert_token_to_id(self, token):
197
+ """Converts a token (str) in an id using the vocab."""
198
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
199
+
200
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
201
+ def _convert_id_to_token(self, index):
202
+ """Converts an index (integer) in a token (str) using the vocab."""
203
+ return self.ids_to_tokens.get(index, self.unk_token)
204
+
205
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
206
+ def convert_tokens_to_string(self, tokens):
207
+ """Converts a sequence of tokens (string) in a single string."""
208
+ out_string = " ".join(tokens).replace(" ##", "").strip()
209
+ return out_string
210
+
211
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
212
+ def build_inputs_with_special_tokens(
213
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
214
+ ) -> List[int]:
215
+ """
216
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
217
+ adding special tokens. A BERT sequence has the following format:
218
+
219
+ - single sequence: `[CLS] X [SEP]`
220
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs to which the special tokens will be added.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
230
+ """
231
+ if token_ids_1 is None:
232
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
233
+ cls = [self.cls_token_id]
234
+ sep = [self.sep_token_id]
235
+ return cls + token_ids_0 + sep + token_ids_1 + sep
236
+
237
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
238
+ def get_special_tokens_mask(
239
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
240
+ ) -> List[int]:
241
+ """
242
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
243
+ special tokens using the tokenizer `prepare_for_model` method.
244
+
245
+ Args:
246
+ token_ids_0 (`List[int]`):
247
+ List of IDs.
248
+ token_ids_1 (`List[int]`, *optional*):
249
+ Optional second list of IDs for sequence pairs.
250
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
251
+ Whether or not the token list is already formatted with special tokens for the model.
252
+
253
+ Returns:
254
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
255
+ """
256
+
257
+ if already_has_special_tokens:
258
+ return super().get_special_tokens_mask(
259
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
260
+ )
261
+
262
+ if token_ids_1 is not None:
263
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
264
+ return [1] + ([0] * len(token_ids_0)) + [1]
265
+
266
+ def create_token_type_ids_from_sequences(
267
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
268
+ ) -> List[int]:
269
+ """
270
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel
271
+ Transformer sequence pair mask has the following format:
272
+
273
+ ```
274
+ 2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
275
+ | first sequence | second sequence |
276
+ ```
277
+
278
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
279
+
280
+ Args:
281
+ token_ids_0 (`List[int]`):
282
+ List of IDs.
283
+ token_ids_1 (`List[int]`, *optional*):
284
+ Optional second list of IDs for sequence pairs.
285
+
286
+ Returns:
287
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
288
+ """
289
+ sep = [self.sep_token_id]
290
+ cls = [self.cls_token_id]
291
+ if token_ids_1 is None:
292
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]
293
+ return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
294
+
295
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
296
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
297
+ index = 0
298
+ if os.path.isdir(save_directory):
299
+ vocab_file = os.path.join(
300
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
301
+ )
302
+ else:
303
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
304
+ with open(vocab_file, "w", encoding="utf-8") as writer:
305
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
306
+ if index != token_index:
307
+ logger.warning(
308
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
309
+ " Please check that the vocabulary is not corrupted!"
310
+ )
311
+ index = token_index
312
+ writer.write(token + "\n")
313
+ index += 1
314
+ return (vocab_file,)
315
+
316
+
317
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
318
+ class BasicTokenizer(object):
319
+ """
320
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
321
+
322
+ Args:
323
+ do_lower_case (`bool`, *optional*, defaults to `True`):
324
+ Whether or not to lowercase the input when tokenizing.
325
+ never_split (`Iterable`, *optional*):
326
+ Collection of tokens which will never be split during tokenization. Only has an effect when
327
+ `do_basic_tokenize=True`
328
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
329
+ Whether or not to tokenize Chinese characters.
330
+
331
+ This should likely be deactivated for Japanese (see this
332
+ [issue](https://github.com/huggingface/transformers/issues/328)).
333
+ strip_accents (`bool`, *optional*):
334
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
335
+ value for `lowercase` (as in the original BERT).
336
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
337
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
338
+ the full context of the words, such as contractions.
339
+ """
340
+
341
+ def __init__(
342
+ self,
343
+ do_lower_case=True,
344
+ never_split=None,
345
+ tokenize_chinese_chars=True,
346
+ strip_accents=None,
347
+ do_split_on_punc=True,
348
+ ):
349
+ if never_split is None:
350
+ never_split = []
351
+ self.do_lower_case = do_lower_case
352
+ self.never_split = set(never_split)
353
+ self.tokenize_chinese_chars = tokenize_chinese_chars
354
+ self.strip_accents = strip_accents
355
+ self.do_split_on_punc = do_split_on_punc
356
+
357
+ def tokenize(self, text, never_split=None):
358
+ """
359
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
360
+
361
+ Args:
362
+ never_split (`List[str]`, *optional*)
363
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
364
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
365
+ """
366
+ # union() returns a new set by concatenating the two sets.
367
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
368
+ text = self._clean_text(text)
369
+
370
+ # This was added on November 1st, 2018 for the multilingual and Chinese
371
+ # models. This is also applied to the English models now, but it doesn't
372
+ # matter since the English models were not trained on any Chinese data
373
+ # and generally don't have any Chinese data in them (there are Chinese
374
+ # characters in the vocabulary because Wikipedia does have some Chinese
375
+ # words in the English Wikipedia.).
376
+ if self.tokenize_chinese_chars:
377
+ text = self._tokenize_chinese_chars(text)
378
+ # prevents treating the same character with different unicode codepoints as different characters
379
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
380
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
381
+ split_tokens = []
382
+ for token in orig_tokens:
383
+ if token not in never_split:
384
+ if self.do_lower_case:
385
+ token = token.lower()
386
+ if self.strip_accents is not False:
387
+ token = self._run_strip_accents(token)
388
+ elif self.strip_accents:
389
+ token = self._run_strip_accents(token)
390
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
391
+
392
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
393
+ return output_tokens
394
+
395
+ def _run_strip_accents(self, text):
396
+ """Strips accents from a piece of text."""
397
+ text = unicodedata.normalize("NFD", text)
398
+ output = []
399
+ for char in text:
400
+ cat = unicodedata.category(char)
401
+ if cat == "Mn":
402
+ continue
403
+ output.append(char)
404
+ return "".join(output)
405
+
406
+ def _run_split_on_punc(self, text, never_split=None):
407
+ """Splits punctuation on a piece of text."""
408
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
409
+ return [text]
410
+ chars = list(text)
411
+ i = 0
412
+ start_new_word = True
413
+ output = []
414
+ while i < len(chars):
415
+ char = chars[i]
416
+ if _is_punctuation(char):
417
+ output.append([char])
418
+ start_new_word = True
419
+ else:
420
+ if start_new_word:
421
+ output.append([])
422
+ start_new_word = False
423
+ output[-1].append(char)
424
+ i += 1
425
+
426
+ return ["".join(x) for x in output]
427
+
428
+ def _tokenize_chinese_chars(self, text):
429
+ """Adds whitespace around any CJK character."""
430
+ output = []
431
+ for char in text:
432
+ cp = ord(char)
433
+ if self._is_chinese_char(cp):
434
+ output.append(" ")
435
+ output.append(char)
436
+ output.append(" ")
437
+ else:
438
+ output.append(char)
439
+ return "".join(output)
440
+
441
+ def _is_chinese_char(self, cp):
442
+ """Checks whether CP is the codepoint of a CJK character."""
443
+ # This defines a "chinese character" as anything in the CJK Unicode block:
444
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
445
+ #
446
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
447
+ # despite its name. The modern Korean Hangul alphabet is a different block,
448
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
449
+ # space-separated words, so they are not treated specially and handled
450
+ # like the all of the other languages.
451
+ if (
452
+ (cp >= 0x4E00 and cp <= 0x9FFF)
453
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
454
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
455
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
456
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
457
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
458
+ or (cp >= 0xF900 and cp <= 0xFAFF)
459
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
460
+ ): #
461
+ return True
462
+
463
+ return False
464
+
465
+ def _clean_text(self, text):
466
+ """Performs invalid character removal and whitespace cleanup on text."""
467
+ output = []
468
+ for char in text:
469
+ cp = ord(char)
470
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
471
+ continue
472
+ if _is_whitespace(char):
473
+ output.append(" ")
474
+ else:
475
+ output.append(char)
476
+ return "".join(output)
477
+
478
+
479
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
480
+ class WordpieceTokenizer(object):
481
+ """Runs WordPiece tokenization."""
482
+
483
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
484
+ self.vocab = vocab
485
+ self.unk_token = unk_token
486
+ self.max_input_chars_per_word = max_input_chars_per_word
487
+
488
+ def tokenize(self, text):
489
+ """
490
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
491
+ tokenization using the given vocabulary.
492
+
493
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
494
+
495
+ Args:
496
+ text: A single token or whitespace separated tokens. This should have
497
+ already been passed through *BasicTokenizer*.
498
+
499
+ Returns:
500
+ A list of wordpiece tokens.
501
+ """
502
+
503
+ output_tokens = []
504
+ for token in whitespace_tokenize(text):
505
+ chars = list(token)
506
+ if len(chars) > self.max_input_chars_per_word:
507
+ output_tokens.append(self.unk_token)
508
+ continue
509
+
510
+ is_bad = False
511
+ start = 0
512
+ sub_tokens = []
513
+ while start < len(chars):
514
+ end = len(chars)
515
+ cur_substr = None
516
+ while start < end:
517
+ substr = "".join(chars[start:end])
518
+ if start > 0:
519
+ substr = "##" + substr
520
+ if substr in self.vocab:
521
+ cur_substr = substr
522
+ break
523
+ end -= 1
524
+ if cur_substr is None:
525
+ is_bad = True
526
+ break
527
+ sub_tokens.append(cur_substr)
528
+ start = end
529
+
530
+ if is_bad:
531
+ output_tokens.append(self.unk_token)
532
+ else:
533
+ output_tokens.extend(sub_tokens)
534
+ return output_tokens
llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__init__.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_sentencepiece_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_gemma": ["GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "GemmaConfig"],
28
+ }
29
+
30
+ try:
31
+ if not is_sentencepiece_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_gemma"] = ["GemmaTokenizer"]
37
+
38
+ try:
39
+ if not is_tokenizers_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["tokenization_gemma_fast"] = ["GemmaTokenizerFast"]
45
+
46
+
47
+ try:
48
+ if not is_torch_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["modeling_gemma"] = [
54
+ "GemmaForCausalLM",
55
+ "GemmaModel",
56
+ "GemmaPreTrainedModel",
57
+ "GemmaForSequenceClassification",
58
+ ]
59
+
60
+ try:
61
+ if not is_flax_available():
62
+ raise OptionalDependencyNotAvailable()
63
+ except OptionalDependencyNotAvailable:
64
+ pass
65
+ else:
66
+ _import_structure["modeling_flax_gemma"] = [
67
+ "FlaxGemmaForCausalLM",
68
+ "FlaxGemmaModel",
69
+ "FlaxGemmaPreTrainedModel",
70
+ ]
71
+
72
+
73
+ if TYPE_CHECKING:
74
+ from .configuration_gemma import GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP, GemmaConfig
75
+
76
+ try:
77
+ if not is_sentencepiece_available():
78
+ raise OptionalDependencyNotAvailable()
79
+ except OptionalDependencyNotAvailable:
80
+ pass
81
+ else:
82
+ from .tokenization_gemma import GemmaTokenizer
83
+
84
+ try:
85
+ if not is_tokenizers_available():
86
+ raise OptionalDependencyNotAvailable()
87
+ except OptionalDependencyNotAvailable:
88
+ pass
89
+ else:
90
+ from .tokenization_gemma_fast import GemmaTokenizerFast
91
+
92
+ try:
93
+ if not is_torch_available():
94
+ raise OptionalDependencyNotAvailable()
95
+ except OptionalDependencyNotAvailable:
96
+ pass
97
+ else:
98
+ from .modeling_gemma import (
99
+ GemmaForCausalLM,
100
+ GemmaForSequenceClassification,
101
+ GemmaModel,
102
+ GemmaPreTrainedModel,
103
+ )
104
+
105
+ try:
106
+ if not is_flax_available():
107
+ raise OptionalDependencyNotAvailable()
108
+ except OptionalDependencyNotAvailable:
109
+ pass
110
+ else:
111
+ from .modeling_flax_gemma import (
112
+ FlaxGemmaForCausalLM,
113
+ FlaxGemmaModel,
114
+ FlaxGemmaPreTrainedModel,
115
+ )
116
+
117
+
118
+ else:
119
+ import sys
120
+
121
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/gemma/__pycache__/configuration_gemma.cpython-310.pyc ADDED
Binary file (6.3 kB). View file