diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e53cb679b3fe0a6d6d336f989162d64598f655b7
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e128694b274c6b8ac07428a3141013726383926d
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38e212ae43f189a3ba7ec953f3519714f74a5a40
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7061212589e3defd7990ef8d48b3bcb5ba47bf65
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/image_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84ea97fd70cebe9437800ecfe29e6f75044c3b45
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae867a76c8fa091983e1226632c4296256c78c7c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a46666404675d2e70b5ee3803cb0e56a38157964
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils_fast.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c38c158b7fe7d6c542cc3097a70078243733c75
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/__pycache__/trainer_pt_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a8ef35ff439e48caf92dba731f7c551f6dcf285
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/__init__.py
@@ -0,0 +1,44 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .data_collator import (
+ DataCollatorForLanguageModeling,
+ DataCollatorForPermutationLanguageModeling,
+ DataCollatorForSeq2Seq,
+ DataCollatorForSOP,
+ DataCollatorForTokenClassification,
+ DataCollatorForWholeWordMask,
+ DataCollatorWithPadding,
+ DefaultDataCollator,
+ default_data_collator,
+)
+from .metrics import glue_compute_metrics, xnli_compute_metrics
+from .processors import (
+ DataProcessor,
+ InputExample,
+ InputFeatures,
+ SingleSentenceClassificationProcessor,
+ SquadExample,
+ SquadFeatures,
+ SquadV1Processor,
+ SquadV2Processor,
+ glue_convert_examples_to_features,
+ glue_output_modes,
+ glue_processors,
+ glue_tasks_num_labels,
+ squad_convert_examples_to_features,
+ xnli_output_modes,
+ xnli_processors,
+ xnli_tasks_num_labels,
+)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/data_collator.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/data_collator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b81e1f17573c97d33547256271b2ae54d3856ab9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/data_collator.py
@@ -0,0 +1,1568 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+import warnings
+from collections.abc import Mapping
+from dataclasses import dataclass
+from random import randint
+from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
+
+import numpy as np
+
+from ..models.bert import BertTokenizer, BertTokenizerFast
+from ..tokenization_utils_base import PreTrainedTokenizerBase
+from ..utils import PaddingStrategy
+
+
+InputDataClass = NewType("InputDataClass", Any)
+
+"""
+A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
+of PyTorch/TensorFlow tensors or NumPy arrays.
+"""
+DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, Any]])
+
+
+class DataCollatorMixin:
+ def __call__(self, features, return_tensors=None):
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ if return_tensors == "tf":
+ return self.tf_call(features)
+ elif return_tensors == "pt":
+ return self.torch_call(features)
+ elif return_tensors == "np":
+ return self.numpy_call(features)
+ else:
+ raise ValueError(f"Framework '{return_tensors}' not recognized!")
+
+
+def pad_without_fast_tokenizer_warning(tokenizer, *pad_args, **pad_kwargs):
+ """
+ Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
+ """
+
+ # To avoid errors when using Feature extractors
+ if not hasattr(tokenizer, "deprecation_warnings"):
+ return tokenizer.pad(*pad_args, **pad_kwargs)
+
+ # Save the state of the warning, then disable it
+ warning_state = tokenizer.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False)
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
+
+ try:
+ padded = tokenizer.pad(*pad_args, **pad_kwargs)
+ finally:
+ # Restore the state of the warning.
+ tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = warning_state
+
+ return padded
+
+
+def default_data_collator(features: List[InputDataClass], return_tensors="pt") -> Dict[str, Any]:
+ """
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
+ potential keys named:
+
+ - `label`: handles a single value (int or float) per object
+ - `label_ids`: handles a list of values per object
+
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
+ to the model. See glue and ner for example of how it's useful.
+ """
+
+ # In this function we'll make the assumption that all `features` in the batch
+ # have the same attributes.
+ # So we will look at the first element as a proxy for what attributes exist
+ # on the whole batch.
+
+ if return_tensors == "pt":
+ return torch_default_data_collator(features)
+ elif return_tensors == "tf":
+ return tf_default_data_collator(features)
+ elif return_tensors == "np":
+ return numpy_default_data_collator(features)
+
+
+@dataclass
+class DefaultDataCollator(DataCollatorMixin):
+ """
+ Very simple data collator that simply collates batches of dict-like objects and performs special handling for
+ potential keys named:
+
+ - `label`: handles a single value (int or float) per object
+ - `label_ids`: handles a list of values per object
+
+ Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
+ to the model. See glue and ner for example of how it's useful.
+
+ This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
+ helpful if you need to set a return_tensors value at initialization.
+
+ Args:
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ return_tensors: str = "pt"
+
+ def __call__(self, features: List[Dict[str, Any]], return_tensors=None) -> Dict[str, Any]:
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ return default_data_collator(features, return_tensors)
+
+
+def torch_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ import torch
+
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
+ dtype = torch.long if isinstance(label, int) else torch.float
+ batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
+ elif "label_ids" in first and first["label_ids"] is not None:
+ if isinstance(first["label_ids"], torch.Tensor):
+ batch["labels"] = torch.stack([f["label_ids"] for f in features])
+ else:
+ dtype = torch.long if isinstance(first["label_ids"][0], int) else torch.float
+ batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
+
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
+ if isinstance(v, torch.Tensor):
+ batch[k] = torch.stack([f[k] for f in features])
+ elif isinstance(v, np.ndarray):
+ batch[k] = torch.tensor(np.stack([f[k] for f in features]))
+ else:
+ batch[k] = torch.tensor([f[k] for f in features])
+
+ return batch
+
+
+def tf_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label_col_name = "label"
+ elif "label_ids" in first and first["label_ids"] is not None:
+ label_col_name = "label_ids"
+ elif "labels" in first and first["labels"] is not None:
+ label_col_name = "labels"
+ else:
+ label_col_name = None
+ if label_col_name is not None:
+ if isinstance(first[label_col_name], tf.Tensor):
+ dtype = tf.int64 if first[label_col_name].dtype.is_integer else tf.float32
+ elif isinstance(first[label_col_name], np.ndarray) or isinstance(first[label_col_name], np.generic):
+ dtype = tf.int64 if np.issubdtype(first[label_col_name].dtype, np.integer) else tf.float32
+ elif isinstance(first[label_col_name], (tuple, list)):
+ dtype = tf.int64 if isinstance(first[label_col_name][0], int) else tf.float32
+ else:
+ dtype = tf.int64 if isinstance(first[label_col_name], int) else tf.float32
+ batch["labels"] = tf.convert_to_tensor([f[label_col_name] for f in features], dtype=dtype)
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids", "labels") and v is not None and not isinstance(v, str):
+ if isinstance(v, (tf.Tensor, np.ndarray)):
+ batch[k] = tf.stack([f[k] for f in features])
+ else:
+ batch[k] = tf.convert_to_tensor([f[k] for f in features])
+
+ return batch
+
+
+def numpy_default_data_collator(features: List[InputDataClass]) -> Dict[str, Any]:
+ if not isinstance(features[0], Mapping):
+ features = [vars(f) for f in features]
+ first = features[0]
+ batch = {}
+
+ # Special handling for labels.
+ # Ensure that tensor is created with the correct type
+ # (it should be automatically the case, but let's make sure of it.)
+ if "label" in first and first["label"] is not None:
+ label = first["label"].item() if isinstance(first["label"], np.ndarray) else first["label"]
+ dtype = np.int64 if isinstance(label, int) else np.float32
+ batch["labels"] = np.array([f["label"] for f in features], dtype=dtype)
+ elif "label_ids" in first and first["label_ids"] is not None:
+ if isinstance(first["label_ids"], np.ndarray):
+ batch["labels"] = np.stack([f["label_ids"] for f in features])
+ else:
+ dtype = np.int64 if isinstance(first["label_ids"][0], int) else np.float32
+ batch["labels"] = np.array([f["label_ids"] for f in features], dtype=dtype)
+
+ # Handling of all other possible keys.
+ # Again, we will use the first element to figure out which key/values are not None for this model.
+ for k, v in first.items():
+ if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
+ if isinstance(v, np.ndarray):
+ batch[k] = np.stack([f[k] for f in features])
+ else:
+ batch[k] = np.array([f[k] for f in features])
+
+ return batch
+
+
+@dataclass
+class DataCollatorWithPadding:
+ """
+ Data collator that will dynamically pad the inputs received.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ return_tensors: str = "pt"
+
+ def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer,
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors=self.return_tensors,
+ )
+ if "label" in batch:
+ batch["labels"] = batch["label"]
+ del batch["label"]
+ if "label_ids" in batch:
+ batch["labels"] = batch["label_ids"]
+ del batch["label_ids"]
+ return batch
+
+
+@dataclass
+class DataCollatorForTokenClassification(DataCollatorMixin):
+ """
+ Data collator that will dynamically pad the inputs received, as well as the labels.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ label_pad_token_id (`int`, *optional*, defaults to -100):
+ The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ label_pad_token_id: int = -100
+ return_tensors: str = "pt"
+
+ def torch_call(self, features):
+ import torch
+
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+
+ no_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]
+
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer,
+ no_labels_features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors="pt",
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = batch["input_ids"].shape[1]
+ padding_side = self.tokenizer.padding_side
+
+ def to_list(tensor_or_iterable):
+ if isinstance(tensor_or_iterable, torch.Tensor):
+ return tensor_or_iterable.tolist()
+ return list(tensor_or_iterable)
+
+ if padding_side == "right":
+ batch[label_name] = [
+ to_list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch[label_name] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + to_list(label) for label in labels
+ ]
+
+ batch[label_name] = torch.tensor(batch[label_name], dtype=torch.int64)
+ return batch
+
+ def tf_call(self, features):
+ import tensorflow as tf
+
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer,
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
+ return_tensors="tf" if labels is None else None,
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = tf.convert_to_tensor(batch["input_ids"]).shape[1]
+ padding_side = self.tokenizer.padding_side
+ if padding_side == "right":
+ batch["labels"] = [
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch["labels"] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
+ ]
+
+ batch = {k: tf.convert_to_tensor(v, dtype=tf.int64) for k, v in batch.items()}
+ return batch
+
+ def numpy_call(self, features):
+ label_name = "label" if "label" in features[0].keys() else "labels"
+ labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer,
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ # Conversion to tensors will fail if we have labels as they are not of the same length yet.
+ return_tensors="np" if labels is None else None,
+ )
+
+ if labels is None:
+ return batch
+
+ sequence_length = np.array(batch["input_ids"]).shape[1]
+ padding_side = self.tokenizer.padding_side
+ if padding_side == "right":
+ batch["labels"] = [
+ list(label) + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels
+ ]
+ else:
+ batch["labels"] = [
+ [self.label_pad_token_id] * (sequence_length - len(label)) + list(label) for label in labels
+ ]
+
+ batch = {k: np.array(v, dtype=np.int64) for k, v in batch.items()}
+ return batch
+
+
+def _torch_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ import torch
+
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple, np.ndarray)):
+ examples = [torch.tensor(e, dtype=torch.long) for e in examples]
+
+ length_of_first = examples[0].size(0)
+
+ # Check if padding is necessary.
+
+ are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return torch.stack(examples, dim=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(x.size(0) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
+ for i, example in enumerate(examples):
+ if tokenizer.padding_side == "right":
+ result[i, : example.shape[0]] = example
+ else:
+ result[i, -example.shape[0] :] = example
+ return result
+
+
+def _tf_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ import tensorflow as tf
+
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple)):
+ examples = [tf.convert_to_tensor(e, dtype=tf.int64) for e in examples]
+
+ # Check if padding is necessary.
+ length_of_first = len(examples[0])
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return tf.stack(examples, axis=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(len(x) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ # result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
+ result = []
+ rank = tf.rank(examples[0])
+ paddings = np.zeros((rank, 2), dtype=np.int32)
+ for example in examples:
+ if tokenizer.padding_side == "right":
+ paddings[0, 1] = max_length - len(example)
+ else:
+ paddings[0, 0] = max_length - len(example)
+ result.append(tf.pad(example, paddings, constant_values=tokenizer.pad_token_id))
+ return tf.stack(result, axis=0)
+
+
+def _numpy_collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
+ """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
+ # Tensorize if necessary.
+ if isinstance(examples[0], (list, tuple)):
+ examples = [np.array(e, dtype=np.int64) for e in examples]
+
+ # Check if padding is necessary.
+ length_of_first = len(examples[0])
+ are_tensors_same_length = all(len(x) == length_of_first for x in examples)
+ if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
+ return np.stack(examples, axis=0)
+
+ # If yes, check if we have a `pad_token`.
+ if tokenizer._pad_token is None:
+ raise ValueError(
+ "You are attempting to pad samples but the tokenizer you are using"
+ f" ({tokenizer.__class__.__name__}) does not have a pad token."
+ )
+
+ # Creating the full tensor and filling it with our data.
+ max_length = max(len(x) for x in examples)
+ if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+ result = np.full(shape=(len(examples), max_length), fill_value=tokenizer.pad_token_id, dtype=examples[0].dtype)
+ for i, example in enumerate(examples):
+ if tokenizer.padding_side == "right":
+ result[i, : example.shape[0]] = example
+ else:
+ result[i, -example.shape[0] :] = example
+ return result
+
+
+def tolist(x):
+ if isinstance(x, list):
+ return x
+ elif hasattr(x, "numpy"): # Checks for TF tensors without needing the import
+ x = x.numpy()
+ return x.tolist()
+
+
+@dataclass
+class DataCollatorForSeq2Seq:
+ """
+ Data collator that will dynamically pad the inputs received, as well as the labels.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ model ([`PreTrainedModel`], *optional*):
+ The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
+ prepare the *decoder_input_ids*
+
+ This is useful when using *label_smoothing* to avoid calculating loss twice.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
+ among:
+
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence is provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
+ 7.5 (Volta).
+ label_pad_token_id (`int`, *optional*, defaults to -100):
+ The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
+ return_tensors (`str`, *optional*, defaults to `"pt"`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ model: Optional[Any] = None
+ padding: Union[bool, str, PaddingStrategy] = True
+ max_length: Optional[int] = None
+ pad_to_multiple_of: Optional[int] = None
+ label_pad_token_id: int = -100
+ return_tensors: str = "pt"
+
+ def __call__(self, features, return_tensors=None):
+ if return_tensors is None:
+ return_tensors = self.return_tensors
+ labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
+ # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
+ # same length to return tensors.
+ if labels is not None:
+ max_label_length = max(len(l) for l in labels)
+ if self.pad_to_multiple_of is not None:
+ max_label_length = (
+ (max_label_length + self.pad_to_multiple_of - 1)
+ // self.pad_to_multiple_of
+ * self.pad_to_multiple_of
+ )
+
+ padding_side = self.tokenizer.padding_side
+ for feature in features:
+ remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
+ if isinstance(feature["labels"], list):
+ feature["labels"] = (
+ feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
+ )
+ elif padding_side == "right":
+ feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
+ else:
+ feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
+
+ features = pad_without_fast_tokenizer_warning(
+ self.tokenizer,
+ features,
+ padding=self.padding,
+ max_length=self.max_length,
+ pad_to_multiple_of=self.pad_to_multiple_of,
+ return_tensors=return_tensors,
+ )
+
+ # prepare decoder_input_ids
+ if (
+ labels is not None
+ and self.model is not None
+ and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
+ ):
+ decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
+ features["decoder_input_ids"] = decoder_input_ids
+
+ return features
+
+
+@dataclass
+class DataCollatorForLanguageModeling(DataCollatorMixin):
+ """
+ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
+ are not all of the same length.
+
+ Args:
+ tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
+ The tokenizer used for encoding the data.
+ mlm (`bool`, *optional*, defaults to `True`):
+ Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
+ with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
+ tokens and the value to predict for the masked token.
+ mlm_probability (`float`, *optional*, defaults to 0.15):
+ The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+ return_tensors (`str`):
+ The type of Tensor to return. Allowable values are "np", "pt" and "tf".
+
+
+
+ For best performance, this data collator should be used with a dataset having items that are dictionaries or
+ BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
+ [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.
+
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ mlm: bool = True
+ mlm_probability: float = 0.15
+ pad_to_multiple_of: Optional[int] = None
+ tf_experimental_compile: bool = False
+ return_tensors: str = "pt"
+
+ def __post_init__(self):
+ if self.mlm and self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. "
+ "You should pass `mlm=False` to train on causal language modeling instead."
+ )
+ if self.tf_experimental_compile:
+ import tensorflow as tf
+
+ self.tf_mask_tokens = tf.function(self.tf_mask_tokens, jit_compile=True)
+
+ @staticmethod
+ def tf_bernoulli(shape, probability):
+ import tensorflow as tf
+
+ prob_matrix = tf.fill(shape, probability)
+ return tf.cast(prob_matrix - tf.random.uniform(shape, 0, 1) >= 0, tf.bool)
+
+ def tf_mask_tokens(
+ self, inputs: Any, vocab_size, mask_token_id, special_tokens_mask: Optional[Any] = None
+ ) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ import tensorflow as tf
+
+ mask_token_id = tf.cast(mask_token_id, inputs.dtype)
+
+ input_shape = tf.shape(inputs)
+ # 1 for a special token, 0 for a normal token in the special tokens mask
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ masked_indices = self.tf_bernoulli(input_shape, self.mlm_probability) & ~special_tokens_mask
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
+ labels = tf.where(masked_indices, inputs, -100)
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
+
+ inputs = tf.where(indices_replaced, mask_token_id, inputs)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = self.tf_bernoulli(input_shape, 0.1) & masked_indices & ~indices_replaced
+ random_words = tf.random.uniform(input_shape, maxval=vocab_size, dtype=inputs.dtype)
+
+ inputs = tf.where(indices_random, random_words, inputs)
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer, examples, return_tensors="tf", pad_to_multiple_of=self.pad_to_multiple_of
+ )
+ else:
+ batch = {
+ "input_ids": _tf_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
+ for val in batch["input_ids"].numpy().tolist()
+ ]
+ # Cannot directly create as bool
+ special_tokens_mask = tf.cast(tf.convert_to_tensor(special_tokens_mask, dtype=tf.int64), tf.bool)
+ else:
+ special_tokens_mask = tf.cast(special_tokens_mask, tf.bool)
+ batch["input_ids"], batch["labels"] = self.tf_mask_tokens(
+ tf.cast(batch["input_ids"], tf.int64),
+ special_tokens_mask=special_tokens_mask,
+ mask_token_id=self.tokenizer.mask_token_id,
+ vocab_size=len(self.tokenizer),
+ )
+ else:
+ labels = batch["input_ids"]
+ if self.tokenizer.pad_token_id is not None:
+ # Replace self.tokenizer.pad_token_id with -100
+ labels = tf.where(labels == self.tokenizer.pad_token_id, -100, labels)
+ else:
+ labels = tf.identity(labels) # Makes a copy, just in case
+ batch["labels"] = labels
+ return batch
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer, examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of
+ )
+ else:
+ batch = {
+ "input_ids": _torch_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ batch["input_ids"], batch["labels"] = self.torch_mask_tokens(
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
+ )
+ else:
+ labels = batch["input_ids"].clone()
+ if self.tokenizer.pad_token_id is not None:
+ labels[labels == self.tokenizer.pad_token_id] = -100
+ batch["labels"] = labels
+ return batch
+
+ def torch_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ import torch
+
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
+ else:
+ special_tokens_mask = special_tokens_mask.bool()
+
+ probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ # Handle dict or lists with proper padding and conversion to tensor.
+ if isinstance(examples[0], Mapping):
+ batch = pad_without_fast_tokenizer_warning(
+ self.tokenizer, examples, return_tensors="np", pad_to_multiple_of=self.pad_to_multiple_of
+ )
+ else:
+ batch = {
+ "input_ids": _numpy_collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ }
+
+ # If special token mask has been preprocessed, pop it from the dict.
+ special_tokens_mask = batch.pop("special_tokens_mask", None)
+ if self.mlm:
+ batch["input_ids"], batch["labels"] = self.numpy_mask_tokens(
+ batch["input_ids"], special_tokens_mask=special_tokens_mask
+ )
+ else:
+ labels = np.copy(batch["input_ids"])
+ if self.tokenizer.pad_token_id is not None:
+ labels[labels == self.tokenizer.pad_token_id] = -100
+ batch["labels"] = labels
+ return batch
+
+ def numpy_mask_tokens(self, inputs: Any, special_tokens_mask: Optional[Any] = None) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
+ """
+ labels = np.copy(inputs)
+ # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
+ probability_matrix = np.full(labels.shape, self.mlm_probability)
+ if special_tokens_mask is None:
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ special_tokens_mask = np.array(special_tokens_mask, dtype=bool)
+ else:
+ special_tokens_mask = special_tokens_mask.astype(bool)
+
+ probability_matrix[special_tokens_mask] = 0
+ # Numpy doesn't have bernoulli, so we use a binomial with 1 trial
+ masked_indices = np.random.binomial(1, probability_matrix, size=probability_matrix.shape).astype(bool)
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
+ inputs[indices_replaced] = self.tokenizer.mask_token_id
+
+ # 10% of the time, we replace masked input tokens with random word
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ indices_random = (
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
+ )
+ random_words = np.random.randint(
+ low=0, high=len(self.tokenizer), size=np.count_nonzero(indices_random), dtype=np.int64
+ )
+ inputs[indices_random] = random_words
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+
+@dataclass
+class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
+ """
+ Data collator used for language modeling that masks entire words.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for masked language modeling
+
+
+
+ This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
+ that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
+ produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].
+
+ """
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _torch_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _torch_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.torch_mask_tokens(batch_input, batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ import tensorflow as tf
+
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _tf_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _tf_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.tf_mask_tokens(tf.cast(batch_input, tf.int64), batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ input_ids = [e["input_ids"] for e in examples]
+ else:
+ input_ids = examples
+ examples = [{"input_ids": e} for e in examples]
+
+ batch_input = _numpy_collate_batch(input_ids, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+
+ mask_labels = []
+ for e in examples:
+ ref_tokens = []
+ for id in tolist(e["input_ids"]):
+ token = self.tokenizer._convert_id_to_token(id)
+ ref_tokens.append(token)
+
+ # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
+ if "chinese_ref" in e:
+ ref_pos = tolist(e["chinese_ref"])
+ len_seq = len(e["input_ids"])
+ for i in range(len_seq):
+ if i in ref_pos:
+ ref_tokens[i] = "##" + ref_tokens[i]
+ mask_labels.append(self._whole_word_mask(ref_tokens))
+ batch_mask = _numpy_collate_batch(mask_labels, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)
+ inputs, labels = self.numpy_mask_tokens(batch_input, batch_mask)
+ return {"input_ids": inputs, "labels": labels}
+
+ def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
+ """
+ Get 0/1 labels for masked tokens with whole word mask proxy
+ """
+ if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
+ warnings.warn(
+ "DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
+ "Please refer to the documentation for more information."
+ )
+
+ cand_indexes = []
+ for i, token in enumerate(input_tokens):
+ if token == "[CLS]" or token == "[SEP]":
+ continue
+
+ if len(cand_indexes) >= 1 and token.startswith("##"):
+ cand_indexes[-1].append(i)
+ else:
+ cand_indexes.append([i])
+
+ random.shuffle(cand_indexes)
+ num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
+ masked_lms = []
+ covered_indexes = set()
+ for index_set in cand_indexes:
+ if len(masked_lms) >= num_to_predict:
+ break
+ # If adding a whole-word mask would exceed the maximum number of
+ # predictions, then just skip this candidate.
+ if len(masked_lms) + len(index_set) > num_to_predict:
+ continue
+ is_any_index_covered = False
+ for index in index_set:
+ if index in covered_indexes:
+ is_any_index_covered = True
+ break
+ if is_any_index_covered:
+ continue
+ for index in index_set:
+ covered_indexes.add(index)
+ masked_lms.append(index)
+
+ if len(covered_indexes) != len(masked_lms):
+ raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
+ mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
+ return mask_labels
+
+ def torch_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ probability_matrix = mask_labels
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
+
+ masked_indices = probability_matrix.bool()
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def tf_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ import tensorflow as tf
+
+ input_shape = tf.shape(inputs)
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = tf.identity(inputs)
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ masked_indices = tf.cast(mask_labels, tf.bool)
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels
+ ]
+ masked_indices = masked_indices & ~tf.cast(special_tokens_mask, dtype=tf.bool)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = inputs == self.tokenizer.pad_token_id
+ masked_indices = masked_indices & ~padding_mask
+
+ # Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens
+ labels = tf.where(masked_indices, inputs, -100)
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = self.tf_bernoulli(input_shape, 0.8) & masked_indices
+
+ inputs = tf.where(indices_replaced, self.tokenizer.mask_token_id, inputs)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = self.tf_bernoulli(input_shape, 0.5) & masked_indices & ~indices_replaced
+ random_words = tf.random.uniform(input_shape, maxval=len(self.tokenizer), dtype=tf.int64)
+ inputs = tf.where(indices_random, random_words, inputs)
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+ def numpy_mask_tokens(self, inputs: Any, mask_labels: Any) -> Tuple[Any, Any]:
+ """
+ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
+ 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
+ """
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+ labels = np.copy(inputs)
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+
+ masked_indices = mask_labels.astype(bool)
+
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ masked_indices[np.array(special_tokens_mask, dtype=bool)] = 0
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices[padding_mask] = 0
+
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = np.random.binomial(1, 0.8, size=labels.shape).astype(bool) & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ indices_random = (
+ np.random.binomial(1, 0.5, size=labels.shape).astype(bool) & masked_indices & ~indices_replaced
+ )
+ random_words = np.random.randint(low=0, high=len(self.tokenizer), size=labels.shape, dtype=np.int64)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels
+
+
+@dataclass
+class DataCollatorForSOP(DataCollatorForLanguageModeling):
+ """
+ Data collator used for sentence order prediction task.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for both masked language modeling and sentence order prediction
+ """
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
+ "DataCollatorForLanguageModeling instead.",
+ FutureWarning,
+ )
+
+ def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, Any]:
+ import torch
+ from torch.nn.utils.rnn import pad_sequence
+
+ input_ids = [example["input_ids"] for example in examples]
+ input_ids = _torch_collate_batch(input_ids, self.tokenizer)
+ input_ids, labels, attention_mask = self.mask_tokens(input_ids)
+
+ token_type_ids = [example["token_type_ids"] for example in examples]
+ # size of segment_ids varied because randomness, padding zero to the end as the original implementation
+ token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
+
+ sop_label_list = [example["sentence_order_label"] for example in examples]
+ sentence_order_label = torch.stack(sop_label_list)
+
+ return {
+ "input_ids": input_ids,
+ "labels": labels,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ "sentence_order_label": sentence_order_label,
+ }
+
+ def mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any]:
+ """
+ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
+ original. N-gram not applied yet.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the"
+ " --mlm flag if you want to use this tokenizer."
+ )
+
+ labels = inputs.clone()
+ # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
+ probability_matrix = torch.full(labels.shape, self.mlm_probability)
+ special_tokens_mask = [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
+ ]
+ probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ probability_matrix.masked_fill_(padding_mask, value=0.0)
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+ # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
+ attention_mask = (~masked_indices).float()
+ if self.tokenizer._pad_token is not None:
+ attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ attention_mask.masked_fill_(attention_padding_mask, value=1.0)
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
+ inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
+ inputs[indices_random] = random_words[indices_random]
+
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+ return inputs, labels, attention_mask
+
+
+@dataclass
+class DataCollatorForPermutationLanguageModeling(DataCollatorMixin):
+ """
+ Data collator used for permutation language modeling.
+
+ - collates batches of tensors, honoring their tokenizer's pad_token
+ - preprocesses batches for permutation language modeling with procedures specific to XLNet
+ """
+
+ tokenizer: PreTrainedTokenizerBase
+ plm_probability: float = 1 / 6
+ max_span_length: int = 5 # maximum length of a span of masked tokens
+ return_tensors: str = "pt"
+
+ def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _torch_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def tf_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _tf_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def numpy_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]:
+ if isinstance(examples[0], Mapping):
+ examples = [e["input_ids"] for e in examples]
+ batch = _numpy_collate_batch(examples, self.tokenizer)
+ inputs, perm_mask, target_mapping, labels = self.numpy_mask_tokens(batch)
+ return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
+
+ def torch_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ import torch
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if inputs.size(1) % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = inputs.clone()
+ # Creating the mask and target_mapping tensors
+ masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
+ target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
+
+ for i in range(labels.size(0)):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = labels.size(1)
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = torch.eye(labels.size(1))
+
+ special_tokens_mask = torch.tensor(
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
+ dtype=torch.bool,
+ )
+ masked_indices.masked_fill_(special_tokens_mask, value=0.0)
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels.eq(self.tokenizer.pad_token_id)
+ masked_indices.masked_fill_(padding_mask, value=0.0)
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs[masked_indices] = self.tokenizer.mask_token_id
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
+
+ for i in range(labels.size(0)):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ perm_index = torch.arange(labels.size(1))
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
+ # Permute the two halves such that they do not cross over
+ perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = torch.flatten(perm_index.transpose(0, 1))
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask[i] = (
+ perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
+ ) & masked_indices[i]
+
+ return inputs.long(), perm_mask, target_mapping, labels.long()
+
+ def tf_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ import tensorflow as tf
+
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if tf.shape(inputs)[1] % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = tf.identity(inputs)
+ # Creating the mask and target_mapping tensors
+ masked_indices = np.full(labels.shape.as_list(), 0, dtype=bool)
+ labels_shape = tf.shape(labels)
+ target_mapping = np.zeros((labels_shape[0], labels_shape[1], labels_shape[1]), dtype=np.float32)
+
+ for i in range(len(labels)):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = tf.shape(labels)[1]
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = randint(1, self.max_span_length + 1)
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + randint(0, context_length - span_length + 1)
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = np.eye(labels_shape[1])
+ masked_indices = tf.cast(tf.convert_to_tensor(masked_indices), dtype=tf.bool)
+ target_mapping = tf.convert_to_tensor(target_mapping)
+ special_tokens_mask = tf.convert_to_tensor(
+ [
+ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True)
+ for val in labels.numpy().tolist()
+ ],
+ )
+ special_tokens_mask = tf.cast(special_tokens_mask, dtype=tf.bool)
+ masked_indices = masked_indices & ~special_tokens_mask
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices = masked_indices & ~padding_mask
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs = tf.where(masked_indices, self.tokenizer.mask_token_id, inputs)
+ labels = tf.where(masked_indices, labels, -100) # We only compute loss on masked tokens
+
+ perm_mask = []
+
+ for i in range(len(labels)):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ # tf.range is the equivalent of torch.arange
+ perm_index = tf.range(labels_shape[1])
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = tf.transpose(tf.reshape(perm_index, (-1, labels_shape[1] // 2)))
+ # Permute the two halves such that they do not cross over
+ perm_index = tf.random.shuffle(perm_index) # Shuffles along the first dimension
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = tf.reshape(tf.transpose(perm_index), (-1,))
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index = tf.where(~masked_indices[i] & non_func_mask[i], -1, perm_index)
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask.append(
+ (tf.reshape(perm_index, (labels_shape[1], 1)) <= tf.reshape(perm_index, (1, labels_shape[1])))
+ & masked_indices[i]
+ )
+ perm_mask = tf.stack(perm_mask, axis=0)
+
+ return tf.cast(inputs, tf.int64), tf.cast(perm_mask, tf.float32), target_mapping, tf.cast(labels, tf.int64)
+
+ def numpy_mask_tokens(self, inputs: Any) -> Tuple[Any, Any, Any, Any]:
+ """
+ The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
+
+ 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
+ masked
+ 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
+ span_length]` and mask tokens `start_index:start_index + span_length`
+ 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
+ sequence to be processed), repeat from Step 1.
+ """
+ if self.tokenizer.mask_token is None:
+ raise ValueError(
+ "This tokenizer does not have a mask token which is necessary for permutation language modeling."
+ " Please add a mask token if you want to use this tokenizer."
+ )
+
+ if inputs.shape[1] % 2 != 0:
+ raise ValueError(
+ "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see"
+ " relevant comments in source code for details."
+ )
+
+ labels = np.copy(inputs)
+ # Creating the mask and target_mapping tensors
+ masked_indices = np.full(labels.shape, 0, dtype=bool)
+ target_mapping = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
+
+ for i in range(labels.shape[0]):
+ # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
+ cur_len = 0
+ max_len = labels.shape[1]
+
+ while cur_len < max_len:
+ # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
+ span_length = randint(1, self.max_span_length + 1)
+ # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
+ context_length = int(span_length / self.plm_probability)
+ # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
+ start_index = cur_len + randint(0, context_length - span_length + 1)
+ masked_indices[i, start_index : start_index + span_length] = 1
+ # Set `cur_len = cur_len + context_length`
+ cur_len += context_length
+
+ # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
+ # the i-th predict corresponds to the i-th token.
+ target_mapping[i] = np.eye(labels.shape[1])
+
+ special_tokens_mask = np.array(
+ [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
+ dtype=bool,
+ )
+ masked_indices[special_tokens_mask] = 0
+ if self.tokenizer._pad_token is not None:
+ padding_mask = labels == self.tokenizer.pad_token_id
+ masked_indices[padding_mask] = 0.0
+
+ # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
+ non_func_mask = ~(padding_mask | special_tokens_mask)
+
+ inputs[masked_indices] = self.tokenizer.mask_token_id
+ labels[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ perm_mask = np.zeros((labels.shape[0], labels.shape[1], labels.shape[1]), dtype=np.float32)
+
+ for i in range(labels.shape[0]):
+ # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
+ # determine which tokens a given token can attend to (encoded in `perm_mask`).
+ # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
+ # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
+ # we assume that reused length is half of sequence length and permutation length is equal to reused length.
+ # This requires that the sequence length be even.
+
+ # Create a linear factorisation order
+ perm_index = np.arange(labels.shape[1])
+ # Split this into two halves, assuming that half the sequence is reused each time
+ perm_index = perm_index.reshape((-1, labels.shape[1] // 2)).T
+ # Permute the two halves such that they do not cross over
+ np.random.shuffle(perm_index)
+ # Flatten this out into the desired permuted factorisation order
+ perm_index = perm_index.T.flatten()
+ # Set the permutation indices of non-masked (non-functional) tokens to the
+ # smallest index (-1) so that:
+ # (1) They can be seen by all other positions
+ # (2) They cannot see masked positions, so there won't be information leak
+ perm_index[~masked_indices[i] & non_func_mask[i]] = -1
+ # The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
+ # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
+ # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
+ perm_mask[i] = (
+ perm_index.reshape((labels.shape[1], 1)) <= perm_index.reshape((1, labels.shape[1]))
+ ) & masked_indices[i]
+
+ return inputs.astype(np.int64), perm_mask, target_mapping, labels.astype(np.int64)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0f806d023093adc918c88e922cfc17e25cf8f3c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/squad.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/glue.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/glue.py
new file mode 100644
index 0000000000000000000000000000000000000000..72df3bece21925d15748d53bd82def67bfdd82bb
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/glue.py
@@ -0,0 +1,161 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+import warnings
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import List, Optional, Union
+
+import torch
+from filelock import FileLock
+from torch.utils.data import Dataset
+
+from ...tokenization_utils_base import PreTrainedTokenizerBase
+from ...utils import logging
+from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
+from ..processors.utils import InputFeatures
+
+
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class GlueDataTrainingArguments:
+ """
+ Arguments pertaining to what data we are going to input our model for training and eval.
+
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
+ line.
+ """
+
+ task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
+ data_dir: str = field(
+ metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
+ )
+ max_seq_length: int = field(
+ default=128,
+ metadata={
+ "help": (
+ "The maximum total input sequence length after tokenization. Sequences longer "
+ "than this will be truncated, sequences shorter will be padded."
+ )
+ },
+ )
+ overwrite_cache: bool = field(
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
+ )
+
+ def __post_init__(self):
+ self.task_name = self.task_name.lower()
+
+
+class Split(Enum):
+ train = "train"
+ dev = "dev"
+ test = "test"
+
+
+class GlueDataset(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ args: GlueDataTrainingArguments
+ output_mode: str
+ features: List[InputFeatures]
+
+ def __init__(
+ self,
+ args: GlueDataTrainingArguments,
+ tokenizer: PreTrainedTokenizerBase,
+ limit_length: Optional[int] = None,
+ mode: Union[str, Split] = Split.train,
+ cache_dir: Optional[str] = None,
+ ):
+ warnings.warn(
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
+ "library. You can have a look at this example script for pointers: "
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
+ FutureWarning,
+ )
+ self.args = args
+ self.processor = glue_processors[args.task_name]()
+ self.output_mode = glue_output_modes[args.task_name]
+ if isinstance(mode, str):
+ try:
+ mode = Split[mode]
+ except KeyError:
+ raise KeyError("mode is not a valid split name")
+ # Load data features from cache or dataset file
+ cached_features_file = os.path.join(
+ cache_dir if cache_dir is not None else args.data_dir,
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}",
+ )
+ label_list = self.processor.get_labels()
+ if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
+ "RobertaTokenizer",
+ "RobertaTokenizerFast",
+ "XLMRobertaTokenizer",
+ "BartTokenizer",
+ "BartTokenizerFast",
+ ):
+ # HACK(label indices are swapped in RoBERTa pretrained model)
+ label_list[1], label_list[2] = label_list[2], label_list[1]
+ self.label_list = label_list
+
+ # Make sure only the first process in distributed training processes the dataset,
+ # and the others will use the cache.
+ lock_path = cached_features_file + ".lock"
+ with FileLock(lock_path):
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
+ start = time.time()
+ self.features = torch.load(cached_features_file)
+ logger.info(
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
+ )
+ else:
+ logger.info(f"Creating features from dataset file at {args.data_dir}")
+
+ if mode == Split.dev:
+ examples = self.processor.get_dev_examples(args.data_dir)
+ elif mode == Split.test:
+ examples = self.processor.get_test_examples(args.data_dir)
+ else:
+ examples = self.processor.get_train_examples(args.data_dir)
+ if limit_length is not None:
+ examples = examples[:limit_length]
+ self.features = glue_convert_examples_to_features(
+ examples,
+ tokenizer,
+ max_length=args.max_seq_length,
+ label_list=label_list,
+ output_mode=self.output_mode,
+ )
+ start = time.time()
+ torch.save(self.features, cached_features_file)
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
+ logger.info(
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
+ )
+
+ def __len__(self):
+ return len(self.features)
+
+ def __getitem__(self, i) -> InputFeatures:
+ return self.features[i]
+
+ def get_labels(self):
+ return self.label_list
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c23bf23cf14d4953a278dd3584093d0af084133
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/language_modeling.py
@@ -0,0 +1,530 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+import pickle
+import random
+import time
+import warnings
+from typing import Dict, List, Optional
+
+import torch
+from filelock import FileLock
+from torch.utils.data import Dataset
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+DEPRECATION_WARNING = (
+ "This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
+ "library. You can have a look at this example script for pointers: {0}"
+)
+
+
+class TextDataset(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ def __init__(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ file_path: str,
+ block_size: int,
+ overwrite_cache=False,
+ cache_dir: Optional[str] = None,
+ ):
+ warnings.warn(
+ DEPRECATION_WARNING.format(
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
+ ),
+ FutureWarning,
+ )
+ if os.path.isfile(file_path) is False:
+ raise ValueError(f"Input file path {file_path} not found")
+
+ block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)
+
+ directory, filename = os.path.split(file_path)
+ cached_features_file = os.path.join(
+ cache_dir if cache_dir is not None else directory,
+ f"cached_lm_{tokenizer.__class__.__name__}_{block_size}_{filename}",
+ )
+
+ # Make sure only the first process in distributed training processes the dataset,
+ # and the others will use the cache.
+ lock_path = cached_features_file + ".lock"
+ with FileLock(lock_path):
+ if os.path.exists(cached_features_file) and not overwrite_cache:
+ start = time.time()
+ with open(cached_features_file, "rb") as handle:
+ self.examples = pickle.load(handle)
+ logger.info(
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
+ )
+
+ else:
+ logger.info(f"Creating features from dataset file at {directory}")
+
+ self.examples = []
+ with open(file_path, encoding="utf-8") as f:
+ text = f.read()
+
+ tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
+
+ for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size
+ self.examples.append(
+ tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
+ )
+ # Note that we are losing the last truncated example here for the sake of simplicity (no padding)
+ # If your dataset is small, first you should look for a bigger one :-) and second you
+ # can change this behavior by adding (model specific) padding.
+
+ start = time.time()
+ with open(cached_features_file, "wb") as handle:
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
+ logger.info(
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
+ )
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i) -> torch.Tensor:
+ return torch.tensor(self.examples[i], dtype=torch.long)
+
+
+class LineByLineTextDataset(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
+ warnings.warn(
+ DEPRECATION_WARNING.format(
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
+ ),
+ FutureWarning,
+ )
+ if os.path.isfile(file_path) is False:
+ raise ValueError(f"Input file path {file_path} not found")
+ # Here, we do not cache the features, operating under the assumption
+ # that we will soon use fast multithreaded tokenizers from the
+ # `tokenizers` repo everywhere =)
+ logger.info(f"Creating features from dataset file at {file_path}")
+
+ with open(file_path, encoding="utf-8") as f:
+ lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
+
+ batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
+ self.examples = batch_encoding["input_ids"]
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
+ return self.examples[i]
+
+
+class LineByLineWithRefDataset(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, ref_path: str):
+ warnings.warn(
+ DEPRECATION_WARNING.format(
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm_wwm.py"
+ ),
+ FutureWarning,
+ )
+ if os.path.isfile(file_path) is False:
+ raise ValueError(f"Input file path {file_path} not found")
+ if os.path.isfile(ref_path) is False:
+ raise ValueError(f"Ref file path {file_path} not found")
+ # Here, we do not cache the features, operating under the assumption
+ # that we will soon use fast multithreaded tokenizers from the
+ # `tokenizers` repo everywhere =)
+ logger.info(f"Creating features from dataset file at {file_path}")
+ logger.info(f"Use ref segment results at {ref_path}")
+ with open(file_path, encoding="utf-8") as f:
+ data = f.readlines() # use this method to avoid delimiter '\u2029' to split a line
+ data = [line.strip() for line in data if len(line) > 0 and not line.isspace()]
+ # Get ref inf from file
+ with open(ref_path, encoding="utf-8") as f:
+ ref = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
+ if len(data) != len(ref):
+ raise ValueError(
+ f"Length of Input file should be equal to Ref file. But the length of {file_path} is {len(data)} "
+ f"while length of {ref_path} is {len(ref)}"
+ )
+
+ batch_encoding = tokenizer(data, add_special_tokens=True, truncation=True, max_length=block_size)
+ self.examples = batch_encoding["input_ids"]
+ self.examples = [{"input_ids": torch.tensor(e, dtype=torch.long)} for e in self.examples]
+
+ n = len(self.examples)
+ for i in range(n):
+ self.examples[i]["chinese_ref"] = torch.tensor(ref[i], dtype=torch.long)
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
+ return self.examples[i]
+
+
+class LineByLineWithSOPTextDataset(Dataset):
+ """
+ Dataset for sentence order prediction task, prepare sentence pairs for SOP task
+ """
+
+ def __init__(self, tokenizer: PreTrainedTokenizer, file_dir: str, block_size: int):
+ warnings.warn(
+ DEPRECATION_WARNING.format(
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
+ ),
+ FutureWarning,
+ )
+ if os.path.isdir(file_dir) is False:
+ raise ValueError(f"{file_dir} is not a directory")
+ logger.info(f"Creating features from dataset file folder at {file_dir}")
+ self.examples = []
+ # TODO: randomness could apply a random seed, ex. rng = random.Random(random_seed)
+ # file path looks like ./dataset/wiki_1, ./dataset/wiki_2
+ for file_name in os.listdir(file_dir):
+ file_path = os.path.join(file_dir, file_name)
+ if os.path.isfile(file_path) is False:
+ raise ValueError(f"{file_path} is not a file")
+ article_open = False
+ with open(file_path, encoding="utf-8") as f:
+ original_lines = f.readlines()
+ article_lines = []
+ for line in original_lines:
+ if "" in line:
+ article_open = False
+ document = [
+ tokenizer.convert_tokens_to_ids(tokenizer.tokenize(line))
+ for line in article_lines[1:]
+ if (len(line) > 0 and not line.isspace())
+ ]
+
+ examples = self.create_examples_from_document(document, block_size, tokenizer)
+ self.examples.extend(examples)
+ article_lines = []
+ else:
+ if article_open:
+ article_lines.append(line)
+
+ logger.info("Dataset parse finished.")
+
+ def create_examples_from_document(self, document, block_size, tokenizer, short_seq_prob=0.1):
+ """Creates examples for a single document."""
+
+ # Account for special tokens
+ max_num_tokens = block_size - tokenizer.num_special_tokens_to_add(pair=True)
+
+ # We *usually* want to fill up the entire sequence since we are padding
+ # to `block_size` anyways, so short sequences are generally wasted
+ # computation. However, we *sometimes*
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
+ # The `target_seq_length` is just a rough target however, whereas
+ # `block_size` is a hard limit.
+ target_seq_length = max_num_tokens
+ if random.random() < short_seq_prob:
+ target_seq_length = random.randint(2, max_num_tokens)
+
+ # We DON'T just concatenate all of the tokens from a document into a long
+ # sequence and choose an arbitrary split point because this would make the
+ # next sentence prediction task too easy. Instead, we split the input into
+ # segments "A" and "B" based on the actual "sentences" provided by the user
+ # input.
+ examples = []
+ current_chunk = [] # a buffer stored current working segments
+ current_length = 0
+ i = 0
+ while i < len(document):
+ segment = document[i] # get a segment
+ if not segment:
+ i += 1
+ continue
+ current_chunk.append(segment) # add a segment to current chunk
+ current_length += len(segment) # overall token length
+ # if current length goes to the target length or reaches the end of file, start building token a and b
+ if i == len(document) - 1 or current_length >= target_seq_length:
+ if current_chunk:
+ # `a_end` is how many segments from `current_chunk` go into the `A` (first) sentence.
+ a_end = 1
+ # if current chunk has more than 2 sentences, pick part of it `A` (first) sentence
+ if len(current_chunk) >= 2:
+ a_end = random.randint(1, len(current_chunk) - 1)
+ # token a
+ tokens_a = []
+ for j in range(a_end):
+ tokens_a.extend(current_chunk[j])
+
+ # token b
+ tokens_b = []
+ for j in range(a_end, len(current_chunk)):
+ tokens_b.extend(current_chunk[j])
+
+ if len(tokens_a) == 0 or len(tokens_b) == 0:
+ continue
+
+ # switch tokens_a and tokens_b randomly
+ if random.random() < 0.5:
+ is_next = False
+ tokens_a, tokens_b = tokens_b, tokens_a
+ else:
+ is_next = True
+
+ def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
+ """Truncates a pair of sequences to a maximum sequence length."""
+ while True:
+ total_length = len(tokens_a) + len(tokens_b)
+ if total_length <= max_num_tokens:
+ break
+ trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
+ if not (len(trunc_tokens) >= 1):
+ raise ValueError("Sequence length to be truncated must be no less than one")
+ # We want to sometimes truncate from the front and sometimes from the
+ # back to add more randomness and avoid biases.
+ if random.random() < 0.5:
+ del trunc_tokens[0]
+ else:
+ trunc_tokens.pop()
+
+ truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
+ if not (len(tokens_a) >= 1):
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
+ if not (len(tokens_b) >= 1):
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
+
+ # add special tokens
+ input_ids = tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
+ # add token type ids, 0 for sentence a, 1 for sentence b
+ token_type_ids = tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
+
+ example = {
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
+ "sentence_order_label": torch.tensor(0 if is_next else 1, dtype=torch.long),
+ }
+ examples.append(example)
+ current_chunk = [] # clear current chunk
+ current_length = 0 # reset current text length
+ i += 1 # go to next line
+ return examples
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i) -> Dict[str, torch.tensor]:
+ return self.examples[i]
+
+
+class TextDatasetForNextSentencePrediction(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ def __init__(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ file_path: str,
+ block_size: int,
+ overwrite_cache=False,
+ short_seq_probability=0.1,
+ nsp_probability=0.5,
+ ):
+ warnings.warn(
+ DEPRECATION_WARNING.format(
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_mlm.py"
+ ),
+ FutureWarning,
+ )
+ if not os.path.isfile(file_path):
+ raise ValueError(f"Input file path {file_path} not found")
+
+ self.short_seq_probability = short_seq_probability
+ self.nsp_probability = nsp_probability
+
+ directory, filename = os.path.split(file_path)
+ cached_features_file = os.path.join(
+ directory,
+ f"cached_nsp_{tokenizer.__class__.__name__}_{block_size}_{filename}",
+ )
+
+ self.tokenizer = tokenizer
+
+ # Make sure only the first process in distributed training processes the dataset,
+ # and the others will use the cache.
+ lock_path = cached_features_file + ".lock"
+
+ # Input file format:
+ # (1) One sentence per line. These should ideally be actual sentences, not
+ # entire paragraphs or arbitrary spans of text. (Because we use the
+ # sentence boundaries for the "next sentence prediction" task).
+ # (2) Blank lines between documents. Document boundaries are needed so
+ # that the "next sentence prediction" task doesn't span between documents.
+ #
+ # Example:
+ # I am very happy.
+ # Here is the second sentence.
+ #
+ # A new document.
+
+ with FileLock(lock_path):
+ if os.path.exists(cached_features_file) and not overwrite_cache:
+ start = time.time()
+ with open(cached_features_file, "rb") as handle:
+ self.examples = pickle.load(handle)
+ logger.info(
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
+ )
+ else:
+ logger.info(f"Creating features from dataset file at {directory}")
+
+ self.documents = [[]]
+ with open(file_path, encoding="utf-8") as f:
+ while True:
+ line = f.readline()
+ if not line:
+ break
+ line = line.strip()
+
+ # Empty lines are used as document delimiters
+ if not line and len(self.documents[-1]) != 0:
+ self.documents.append([])
+ tokens = tokenizer.tokenize(line)
+ tokens = tokenizer.convert_tokens_to_ids(tokens)
+ if tokens:
+ self.documents[-1].append(tokens)
+
+ logger.info(f"Creating examples from {len(self.documents)} documents.")
+ self.examples = []
+ for doc_index, document in enumerate(self.documents):
+ self.create_examples_from_document(document, doc_index, block_size)
+
+ start = time.time()
+ with open(cached_features_file, "wb") as handle:
+ pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
+ logger.info(
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
+ )
+
+ def create_examples_from_document(self, document: List[List[int]], doc_index: int, block_size: int):
+ """Creates examples for a single document."""
+
+ max_num_tokens = block_size - self.tokenizer.num_special_tokens_to_add(pair=True)
+
+ # We *usually* want to fill up the entire sequence since we are padding
+ # to `block_size` anyways, so short sequences are generally wasted
+ # computation. However, we *sometimes*
+ # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
+ # sequences to minimize the mismatch between pretraining and fine-tuning.
+ # The `target_seq_length` is just a rough target however, whereas
+ # `block_size` is a hard limit.
+ target_seq_length = max_num_tokens
+ if random.random() < self.short_seq_probability:
+ target_seq_length = random.randint(2, max_num_tokens)
+
+ current_chunk = [] # a buffer stored current working segments
+ current_length = 0
+ i = 0
+
+ while i < len(document):
+ segment = document[i]
+ current_chunk.append(segment)
+ current_length += len(segment)
+ if i == len(document) - 1 or current_length >= target_seq_length:
+ if current_chunk:
+ # `a_end` is how many segments from `current_chunk` go into the `A`
+ # (first) sentence.
+ a_end = 1
+ if len(current_chunk) >= 2:
+ a_end = random.randint(1, len(current_chunk) - 1)
+
+ tokens_a = []
+ for j in range(a_end):
+ tokens_a.extend(current_chunk[j])
+
+ tokens_b = []
+
+ if len(current_chunk) == 1 or random.random() < self.nsp_probability:
+ is_random_next = True
+ target_b_length = target_seq_length - len(tokens_a)
+
+ # This should rarely go for more than one iteration for large
+ # corpora. However, just to be careful, we try to make sure that
+ # the random document is not the same as the document
+ # we're processing.
+ for _ in range(10):
+ random_document_index = random.randint(0, len(self.documents) - 1)
+ if random_document_index != doc_index:
+ break
+
+ random_document = self.documents[random_document_index]
+ random_start = random.randint(0, len(random_document) - 1)
+ for j in range(random_start, len(random_document)):
+ tokens_b.extend(random_document[j])
+ if len(tokens_b) >= target_b_length:
+ break
+ # We didn't actually use these segments so we "put them back" so
+ # they don't go to waste.
+ num_unused_segments = len(current_chunk) - a_end
+ i -= num_unused_segments
+ # Actual next
+ else:
+ is_random_next = False
+ for j in range(a_end, len(current_chunk)):
+ tokens_b.extend(current_chunk[j])
+
+ if not (len(tokens_a) >= 1):
+ raise ValueError(f"Length of sequence a is {len(tokens_a)} which must be no less than 1")
+ if not (len(tokens_b) >= 1):
+ raise ValueError(f"Length of sequence b is {len(tokens_b)} which must be no less than 1")
+
+ # add special tokens
+ input_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_a, tokens_b)
+ # add token type ids, 0 for sentence a, 1 for sentence b
+ token_type_ids = self.tokenizer.create_token_type_ids_from_sequences(tokens_a, tokens_b)
+
+ example = {
+ "input_ids": torch.tensor(input_ids, dtype=torch.long),
+ "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
+ "next_sentence_label": torch.tensor(1 if is_random_next else 0, dtype=torch.long),
+ }
+
+ self.examples.append(example)
+
+ current_chunk = []
+ current_length = 0
+
+ i += 1
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i):
+ return self.examples[i]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/squad.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/squad.py
new file mode 100644
index 0000000000000000000000000000000000000000..d81217d818afff5e297e6992d979847cf7c0f4cc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/squad.py
@@ -0,0 +1,229 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import time
+from dataclasses import dataclass, field
+from enum import Enum
+from typing import Dict, List, Optional, Union
+
+import torch
+from filelock import FileLock
+from torch.utils.data import Dataset
+
+from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
+
+
+logger = logging.get_logger(__name__)
+
+MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
+MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
+
+
+@dataclass
+class SquadDataTrainingArguments:
+ """
+ Arguments pertaining to what data we are going to input our model for training and eval.
+ """
+
+ model_type: str = field(
+ default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
+ )
+ data_dir: str = field(
+ default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
+ )
+ max_seq_length: int = field(
+ default=128,
+ metadata={
+ "help": (
+ "The maximum total input sequence length after tokenization. Sequences longer "
+ "than this will be truncated, sequences shorter will be padded."
+ )
+ },
+ )
+ doc_stride: int = field(
+ default=128,
+ metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
+ )
+ max_query_length: int = field(
+ default=64,
+ metadata={
+ "help": (
+ "The maximum number of tokens for the question. Questions longer than this will "
+ "be truncated to this length."
+ )
+ },
+ )
+ max_answer_length: int = field(
+ default=30,
+ metadata={
+ "help": (
+ "The maximum length of an answer that can be generated. This is needed because the start "
+ "and end predictions are not conditioned on one another."
+ )
+ },
+ )
+ overwrite_cache: bool = field(
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
+ )
+ version_2_with_negative: bool = field(
+ default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
+ )
+ null_score_diff_threshold: float = field(
+ default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
+ )
+ n_best_size: int = field(
+ default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
+ )
+ lang_id: int = field(
+ default=0,
+ metadata={
+ "help": (
+ "language id of input for language-specific xlm models (see"
+ " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
+ )
+ },
+ )
+ threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
+
+
+class Split(Enum):
+ train = "train"
+ dev = "dev"
+
+
+class SquadDataset(Dataset):
+ """
+ This will be superseded by a framework-agnostic approach soon.
+ """
+
+ args: SquadDataTrainingArguments
+ features: List[SquadFeatures]
+ mode: Split
+ is_language_sensitive: bool
+
+ def __init__(
+ self,
+ args: SquadDataTrainingArguments,
+ tokenizer: PreTrainedTokenizer,
+ limit_length: Optional[int] = None,
+ mode: Union[str, Split] = Split.train,
+ is_language_sensitive: Optional[bool] = False,
+ cache_dir: Optional[str] = None,
+ dataset_format: Optional[str] = "pt",
+ ):
+ self.args = args
+ self.is_language_sensitive = is_language_sensitive
+ self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
+ if isinstance(mode, str):
+ try:
+ mode = Split[mode]
+ except KeyError:
+ raise KeyError("mode is not a valid split name")
+ self.mode = mode
+ # Load data features from cache or dataset file
+ version_tag = "v2" if args.version_2_with_negative else "v1"
+ cached_features_file = os.path.join(
+ cache_dir if cache_dir is not None else args.data_dir,
+ f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}",
+ )
+
+ # Make sure only the first process in distributed training processes the dataset,
+ # and the others will use the cache.
+ lock_path = cached_features_file + ".lock"
+ with FileLock(lock_path):
+ if os.path.exists(cached_features_file) and not args.overwrite_cache:
+ start = time.time()
+ self.old_features = torch.load(cached_features_file)
+
+ # Legacy cache files have only features, while new cache files
+ # will have dataset and examples also.
+ self.features = self.old_features["features"]
+ self.dataset = self.old_features.get("dataset", None)
+ self.examples = self.old_features.get("examples", None)
+ logger.info(
+ f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
+ )
+
+ if self.dataset is None or self.examples is None:
+ logger.warning(
+ f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
+ " future run"
+ )
+ else:
+ if mode == Split.dev:
+ self.examples = self.processor.get_dev_examples(args.data_dir)
+ else:
+ self.examples = self.processor.get_train_examples(args.data_dir)
+
+ self.features, self.dataset = squad_convert_examples_to_features(
+ examples=self.examples,
+ tokenizer=tokenizer,
+ max_seq_length=args.max_seq_length,
+ doc_stride=args.doc_stride,
+ max_query_length=args.max_query_length,
+ is_training=mode == Split.train,
+ threads=args.threads,
+ return_dataset=dataset_format,
+ )
+
+ start = time.time()
+ torch.save(
+ {"features": self.features, "dataset": self.dataset, "examples": self.examples},
+ cached_features_file,
+ )
+ # ^ This seems to take a lot of time so I want to investigate why and how we can improve.
+ logger.info(
+ f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]"
+ )
+
+ def __len__(self):
+ return len(self.features)
+
+ def __getitem__(self, i) -> Dict[str, torch.Tensor]:
+ # Convert to Tensors and build dataset
+ feature = self.features[i]
+
+ input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
+ attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
+ token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
+ cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
+ p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
+ is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
+
+ inputs = {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ }
+
+ if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
+ del inputs["token_type_ids"]
+
+ if self.args.model_type in ["xlnet", "xlm"]:
+ inputs.update({"cls_index": cls_index, "p_mask": p_mask})
+ if self.args.version_2_with_negative:
+ inputs.update({"is_impossible": is_impossible})
+ if self.is_language_sensitive:
+ inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
+
+ if self.mode == Split.train:
+ start_positions = torch.tensor(feature.start_position, dtype=torch.long)
+ end_positions = torch.tensor(feature.end_position, dtype=torch.long)
+ inputs.update({"start_positions": start_positions, "end_positions": end_positions})
+
+ return inputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h
new file mode 100644
index 0000000000000000000000000000000000000000..119b1fa317d1e5fcfb61a4837e560e9248db05f3
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/ms_deform_attn.h
@@ -0,0 +1,61 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#pragma once
+
+#include "cpu/ms_deform_attn_cpu.h"
+
+#ifdef WITH_CUDA
+#include "cuda/ms_deform_attn_cuda.h"
+#endif
+
+
+at::Tensor
+ms_deform_attn_forward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const int im2col_step)
+{
+ if (value.type().is_cuda())
+ {
+#ifdef WITH_CUDA
+ return ms_deform_attn_cuda_forward(
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
+#else
+ AT_ERROR("Not compiled with GPU support");
+#endif
+ }
+ AT_ERROR("Not implemented on the CPU");
+}
+
+std::vector
+ms_deform_attn_backward(
+ const at::Tensor &value,
+ const at::Tensor &spatial_shapes,
+ const at::Tensor &level_start_index,
+ const at::Tensor &sampling_loc,
+ const at::Tensor &attn_weight,
+ const at::Tensor &grad_output,
+ const int im2col_step)
+{
+ if (value.type().is_cuda())
+ {
+#ifdef WITH_CUDA
+ return ms_deform_attn_cuda_backward(
+ value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
+#else
+ AT_ERROR("Not compiled with GPU support");
+#endif
+ }
+ AT_ERROR("Not implemented on the CPU");
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/vision.cpp b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/vision.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6ce3875568b9ba8d660c90acc805077cca98f891
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/deformable_detr/vision.cpp
@@ -0,0 +1,16 @@
+/*!
+**************************************************************************************************
+* Deformable DETR
+* Copyright (c) 2020 SenseTime. All Rights Reserved.
+* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
+**************************************************************************************************
+* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
+**************************************************************************************************
+*/
+
+#include "ms_deform_attn.h"
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
+ m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
+}
\ No newline at end of file
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu
new file mode 100644
index 0000000000000000000000000000000000000000..042cb4aba1db98be5916aea1de86a7fed0b6510d
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_cuda_bf16.cu
@@ -0,0 +1,186 @@
+#include
+#include
+#include "ATen/ATen.h"
+#define MIN_VALUE (-1e38)
+typedef at::BFloat16 bf16;
+
+__global__ void kernel_forward_bf16(
+ const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
+ const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y
+) {
+ const int idx = blockIdx.x * blockDim.x + threadIdx.x;
+ const int _b = idx / C;
+ const int _c = idx % C;
+ const int _offset = _b * T * C + _c;
+
+ float u = float(_u[_c]);
+ float w = _w[_c];
+ const bf16 *__restrict__ const k = _k + _offset;
+ const bf16 *__restrict__ const v = _v + _offset;
+ bf16 *__restrict__ const y = _y + _offset;
+
+ // aa and bb are running sums divided by exp(pp) (to avoid overflow)
+ float aa = 0, bb = 0, pp = MIN_VALUE;
+ for (int i = 0; i < T; i++) {
+ const int ii = i * C;
+ const float kk = float(k[ii]);
+ const float vv = float(v[ii]);
+
+ float ww = u + kk;
+ float p = max(pp, ww);
+ float e1 = exp(pp - p);
+ float e2 = exp(ww - p);
+ y[ii] = bf16((e1 * aa + e2 * vv) / (e1 * bb + e2));
+
+ ww = w + pp;
+ p = max(ww, kk);
+ e1 = exp(ww - p);
+ e2 = exp(kk - p);
+ aa = e1 * aa + e2 * vv;
+ bb = e1 * bb + e2;
+ pp = p;
+ }
+}
+
+__global__ void kernel_forward_with_state_bf16(
+ const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
+ const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, bf16 *__restrict__ const _y,
+ float *__restrict__ const _s
+) {
+ const int idx = blockIdx.x * blockDim.x + threadIdx.x;
+ const int _b = idx / C;
+ const int _c = idx % C;
+ const int _offset_s = _b * C * 3 + _c * 3;
+ const int _offset = _b * T * C + _c;
+
+ float u = float(_u[_c]);
+ float w = _w[_c];
+ const bf16 *__restrict__ const k = _k + _offset;
+ const bf16 *__restrict__ const v = _v + _offset;
+ bf16 *__restrict__ const y = _y + _offset;
+ float *__restrict__ const s = _s + _offset_s;
+
+ // aa and bb are running sums divided by exp(pp) (to avoid overflow)
+ float aa = s[0], bb = s[1], pp = s[2];
+ for (int i = 0; i < T; i++) {
+ const int ii = i * C;
+ const float kk = float(k[ii]);
+ const float vv = float(v[ii]);
+
+ float ww = u + kk;
+ float p = max(pp, ww);
+ float e1 = exp(pp - p);
+ float e2 = exp(ww - p);
+ y[ii] = bf16(e1 * aa + e2 * vv) / (e1 * bb + e2);
+
+ ww = w + pp;
+ p = max(ww, kk);
+ e1 = exp(ww - p);
+ e2 = exp(kk - p);
+ aa = e1 * aa + e2 * vv;
+ bb = e1 * bb + e2;
+ pp = p;
+ }
+ s[0] = aa;
+ s[1] = bb;
+ s[2] = pp;
+}
+
+__global__ void kernel_backward_bf16(
+ const int B, const int T, const int C, const float *__restrict__ const _w, const bf16 *__restrict__ const _u,
+ const bf16 *__restrict__ const _k, const bf16 *__restrict__ const _v, const bf16 *__restrict__ const _y,
+ const bf16 *__restrict__ const _gy, bf16 *__restrict__ const _gw, bf16 *__restrict__ const _gu,
+ bf16 *__restrict__ const _gk, bf16 *__restrict__ const _gv
+) {
+ const int idx = blockIdx.x * blockDim.x + threadIdx.x;
+ const int _b = idx / C;
+ const int _c = idx % C;
+ const int _offset = _b * T * C + _c;
+
+ float u = float(_u[_c]);
+ float w = _w[_c];
+ const bf16 *__restrict__ const k = _k + _offset;
+ const bf16 *__restrict__ const v = _v + _offset;
+ const bf16 *__restrict__ const y = _y + _offset;
+ const bf16 *__restrict__ const gy = _gy + _offset;
+ bf16 *__restrict__ const gk = _gk + _offset;
+ bf16 *__restrict__ const gv = _gv + _offset;
+
+ float q[Tmax], r[Tmax];
+
+ float gw = 0, gu = 0, aa = 0, bb = 0, ga = 0, gb = 0, pp = MIN_VALUE;
+ for (int i = 0; i < T; i++) {
+ const int ii = i * C;
+ const float kk = float(k[ii]);
+ const float vv = float(v[ii]);
+ const float yy = float(y[ii]);
+
+ float ww = u + kk;
+ float p = max(pp, ww);
+ float e1 = exp(pp - p);
+ float e2 = exp(ww - p);
+ const float qq = float(gy[ii]) / (e1 * bb + e2);
+ gw += (ga - gb * yy) * e1 * qq;
+ gu += (vv - yy) * e2 * qq;
+ q[i] = qq;
+ r[i] = ww - p;
+
+ ww = w + pp;
+ p = max(ww, kk);
+ e1 = exp(ww - p);
+ e2 = exp(kk - p);
+ ga = e1 * (aa + ga);
+ gb = e1 * (bb + gb);
+ aa = e1 * aa + e2 * vv;
+ bb = e1 * bb + e2;
+ pp = p;
+ }
+ const int _offsetBC = _b * C + _c;
+ _gw[_offsetBC] = bf16(gw * _w[_c]); // multiply by w because of w -> -exp(w) in python forward()
+ _gu[_offsetBC] = bf16(gu);
+
+ aa = 0, bb = 0, pp = MIN_VALUE;
+ for (int i = T - 1; i >= 0; i--) {
+ const int ii = i * C;
+ const float kk = float(k[ii]);
+ const float vv = float(v[ii]);
+ const float yy = float(y[ii]);
+ const float qq = q[i];
+ const float rr = r[i];
+
+ float e1 = qq * exp(rr);
+ float e2 = exp(kk + pp);
+ gk[ii] = bf16(e1 * (vv - yy) + e2 * (aa * vv + bb));
+ gv[ii] = bf16(e1 + e2 * aa);
+
+ const float ww = w + pp;
+ const float www = rr - u - kk;
+ const float p = max(ww, www);
+ e1 = exp(ww - p);
+ e2 = qq * exp(www - p);
+ aa = e1 * aa + e2;
+ bb = e1 * bb - e2 * yy;
+ pp = p;
+ }
+}
+
+void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y) {
+ dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
+ assert(B * C % threadsPerBlock.x == 0);
+ dim3 numBlocks(B * C / threadsPerBlock.x);
+ kernel_forward_bf16<<>>(B, T, C, w, u, k, v, y);
+}
+
+void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s) {
+ dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
+ assert(B * C % threadsPerBlock.x == 0);
+ dim3 numBlocks(B * C / threadsPerBlock.x);
+ kernel_forward_with_state_bf16<<>>(B, T, C, w, u, k, v, y, s);
+}
+
+void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv) {
+ dim3 threadsPerBlock( min(C, 32) ); // requires --maxrregcount 60 for optimal performance
+ assert(B * C % threadsPerBlock.x == 0);
+ dim3 numBlocks(B * C / threadsPerBlock.x);
+ kernel_backward_bf16<<>>(B, T, C, w, u, k, v, y, gy, gw, gu, gk, gv);
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..55e7280665927b523a88021d5111daf28a63c905
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/rwkv/wkv_op.cpp
@@ -0,0 +1,66 @@
+#include
+#include "ATen/ATen.h"
+typedef at::BFloat16 bf16;
+
+void cuda_forward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y);
+void cuda_forward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y);
+void cuda_forward_with_state(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *s);
+void cuda_forward_with_state_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, float *s);
+void cuda_backward(int B, int T, int C, float *w, float *u, float *k, float *v, float *y, float *gy, float *gw, float *gu, float *gk, float *gv);
+void cuda_backward_bf16(int B, int T, int C, float *w, bf16 *u, bf16 *k, bf16 *v, bf16 *y, bf16 *gy, bf16 *gw, bf16 *gu, bf16 *gk, bf16 *gv);
+
+void forward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_forward(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr());
+}
+void forward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_forward_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr());
+}
+void forward_with_state(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_forward_with_state(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), s.data_ptr());
+}
+void forward_with_state_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &s) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_forward_with_state_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), s.data_ptr());
+}
+void backward(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_backward(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(), gy.data_ptr(), gw.data_ptr(), gu.data_ptr(), gk.data_ptr(), gv.data_ptr());
+}
+void backward_bf16(torch::Tensor &w, torch::Tensor &u, torch::Tensor &k, torch::Tensor &v, torch::Tensor &y, torch::Tensor &gy, torch::Tensor &gw, torch::Tensor &gu, torch::Tensor &gk, torch::Tensor &gv) {
+ const int B = k.size(0);
+ const int T = k.size(1);
+ const int C = k.size(2);
+ cuda_backward_bf16(B, T, C, w.data_ptr(), u.data_ptr(), k.data_ptr(), v.data_ptr(), y.data_ptr(),
+ gy.data_ptr(), gw.data_ptr(), gu.data_ptr(), gk.data_ptr(), gv.data_ptr());
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("forward", &forward, "wkv forward");
+ m.def("forward_bf16", &forward_bf16, "wkv forward bf16");
+ m.def("forward_with_state", &forward_with_state, "wkv forward with state");
+ m.def("forward_with_state_bf16", &forward_with_state_bf16, "wkv forward with state bf16");
+ m.def("backward", &backward, "wkv backward");
+ m.def("backward_bf16", &backward_bf16, "wkv backward bf16");
+}
+
+TORCH_LIBRARY(wkv, m) {
+ m.def("forward", forward);
+ m.def("forward_bf16", forward_bf16);
+ m.def("forward_with_state", forward_with_state);
+ m.def("forward_with_state_bf16", forward_with_state_bf16);
+ m.def("backward", backward);
+ m.def("backward_bf16", backward_bf16);
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common.h
new file mode 100644
index 0000000000000000000000000000000000000000..e5085c88dd3ea9a12eec264a8c48946bf2b80b23
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common.h
@@ -0,0 +1,10 @@
+
+#define min(a, b) ((a)<(b)?(a):(b))
+#define max(a, b) ((a)>(b)?(a):(b))
+#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0))
+#define select(cond, a, b) ((cond)?(a):(b))
+#define PI 3.141592
+#define EPSILON 1e-8
+#define MAX_VAL 1e12
+#define MIN_VAL -1e12
+#define EMPTY_VALUE -1
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h
new file mode 100644
index 0000000000000000000000000000000000000000..97030870649a2fdac58cb26cf966e8f5c8cc7909
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h
@@ -0,0 +1,9 @@
+
+#define MAX_THREADS_PER_BLOCK 1024
+#define OPTIMAL_THREADS_PER_BLOCK 256
+#define WARP_SIZE 32
+#define MAX_NUM_BLOCK_X 2147483647
+#define MAX_NUM_BLOCK_Y 65535
+#define MAX_NUM_BLOCK_Z 65535
+#define MAX_SHARED_MEM_PER_BLOCK 48000
+#define FULL_MASK 0xffffffff
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h
new file mode 100644
index 0000000000000000000000000000000000000000..6674f93afdc25ab35c5d83881d00028bcf2989fc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h
@@ -0,0 +1,79 @@
+
+#include "common.h"
+
+template
+__device__ int set_insert(T *set, int set_size, T value) {
+ int slot = value % set_size;
+ int start_slot = slot;
+ while (true) {
+ T prev = atomicCAS(&set[slot], EMPTY_VALUE, value);
+ if (prev == EMPTY_VALUE || prev == value) {
+ return slot;
+ }
+ slot = (slot + 1) % set_size;
+ if (slot == start_slot) {
+ return -1;
+ }
+ }
+ return -1;
+}
+
+template
+__device__ int set_lookup(T *set, int set_size, T value) {
+ int slot = value % set_size;
+ int start_slot = slot;
+ while (true) {
+ if (set[slot] == value) {
+ return slot;
+ }
+ slot = (slot + 1) % set_size;
+ if (slot == start_slot) {
+ return -1;
+ }
+ }
+ return -1;
+}
+
+template
+__device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
+ __syncthreads();
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
+ int offset_idx = i + thread_id;
+ if (offset_idx < buffer_size) {
+ buffer[offset_idx] = init_value;
+ }
+ }
+ __syncthreads();
+}
+
+template
+__device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
+ __syncthreads();
+ for (int i = 0; i < data_length; i = i + num_threads) {
+ int offset_idx = i + thread_id;
+ if (offset_idx < data_length) {
+ dist_pt[offset_idx] = src_pt[offset_idx];
+ }
+ }
+ __syncthreads();
+}
+
+template
+__device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) {
+ for (int i = 0; i < buffer_size; i = i + num_threads) {
+ int offset_idx = i + thread_id;
+ if (offset_idx < buffer_size) {
+ buffer[offset_idx] = init_value;
+ }
+ }
+}
+
+template
+__device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) {
+ for (int i = 0; i < data_length; i = i + num_threads) {
+ int offset_idx = i + thread_id;
+ if (offset_idx < data_length) {
+ dist_pt[offset_idx] = src_pt[offset_idx];
+ }
+ }
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu
new file mode 100644
index 0000000000000000000000000000000000000000..c6b13e6cb5f53c9c62e51d2c399a14d14dab7037
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu
@@ -0,0 +1,588 @@
+// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation.cu
+
+#include
+#include
+#include "fast_lsh_cumulation.h"
+#include "fast_lsh_cumulation_cuda.h"
+#include "common_cuda.h"
+#include "common.h"
+#include
+//////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+std::vector fast_hash_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_vector,
+ at::Tensor key_mask,
+ at::Tensor key_vector,
+ int num_hash_f,
+ int hash_code_len,
+ bool use_cuda
+) {
+
+ int batch_size = query_vector.size(0);
+ int num_query = query_vector.size(1);
+ int num_key = key_vector.size(1);
+ int vector_dim = query_vector.size(2);
+
+ int num_hash_per_part = vector_dim / hash_code_len;
+ int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part));
+
+ at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1;
+ at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options());
+ at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options());
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ float *query_vector_ptr = query_vector.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ float *key_vector_ptr = key_vector.data_ptr();
+
+ int *Dmat_ptr = Dmat.data_ptr();
+
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+
+ if (use_cuda) {
+ {
+ dim3 threads(vector_dim);
+ dim3 blocks(num_part, num_query, batch_size);
+ int shared_mem = vector_dim * sizeof(float);
+ fast_hash_ver1_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_vector_ptr,
+ Dmat_ptr,
+ query_hash_code_ptr,
+ batch_size,
+ num_query,
+ vector_dim,
+ num_part,
+ num_hash_f,
+ hash_code_len
+ );
+ }
+ {
+ dim3 threads(vector_dim);
+ dim3 blocks(num_part, num_key, batch_size);
+ int shared_mem = vector_dim * sizeof(float);
+ fast_hash_ver1_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_vector_ptr,
+ Dmat_ptr,
+ key_hash_code_ptr,
+ batch_size,
+ num_key,
+ vector_dim,
+ num_part,
+ num_hash_f,
+ hash_code_len
+ );
+ }
+ }
+
+ return {query_hash_code, key_hash_code};
+
+}
+
+at::Tensor lsh_cumulation_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+) {
+
+ int batch_size = query_hash_code.size(0);
+ int num_hash_f = query_hash_code.size(2);
+
+ int num_query = query_hash_code.size(1);
+ int num_key = key_hash_code.size(1);
+ int value_dim = value.size(2);
+
+ at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
+
+ if (use_cuda) {
+ int threads_x = WARP_SIZE;
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
+ int block_x_step1 = num_key / threads_y;
+ int block_x_step2 = num_query / threads_y;
+ int block_y = batch_size;
+
+ dim3 threads(threads_x, threads_y);
+ dim3 blocks_step1(block_x_step1, block_y);
+ dim3 blocks_step2(block_x_step2, block_y);
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+ float *value_ptr = value.data_ptr();
+ float *hashtable_value_ptr = hashtable_value.data_ptr();
+ float *cumulation_value_ptr = cumulation_value.data_ptr();
+
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
+
+ lsh_cumulation_ver1_step1_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ value_ptr,
+ hashtable_value_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key,
+ value_dim,
+ value_offset
+ );
+
+ lsh_cumulation_ver1_step2_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ hashtable_value_ptr,
+ cumulation_value_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query,
+ value_dim,
+ value_offset
+ );
+ }
+
+ }
+
+ return cumulation_value;
+
+}
+
+at::Tensor lsh_weighted_cumulation_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+) {
+
+ int batch_size = query_hash_code.size(0);
+ int num_hash_f = query_hash_code.size(2);
+
+ int num_query = query_hash_code.size(1);
+ int num_key = key_hash_code.size(1);
+ int value_dim = value.size(2);
+ int weight_dim = query_weight.size(2);
+
+ at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options());
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
+
+ if (use_cuda) {
+ int threads_x = WARP_SIZE;
+ int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE;
+ int block_x_step1 = num_key / threads_y;
+ int block_x_step2 = num_query / threads_y;
+ int block_y = batch_size;
+
+ dim3 threads(threads_x, threads_y);
+ dim3 blocks_step1(block_x_step1, block_y);
+ dim3 blocks_step2(block_x_step2, block_y);
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ float *query_weight_ptr = query_weight.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+ float *key_weight_ptr = key_weight.data_ptr();
+ float *value_ptr = value.data_ptr();
+ float *hashtable_value_ptr = hashtable_value.data_ptr();
+ float *cumulation_value_ptr = cumulation_value.data_ptr();
+
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+ for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) {
+
+ cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float));
+
+ lsh_weighted_cumulation_ver1_step1_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ key_weight_ptr,
+ value_ptr,
+ hashtable_value_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key,
+ value_dim,
+ weight_dim,
+ value_offset,
+ weight_idx
+ );
+
+ lsh_weighted_cumulation_ver1_step2_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ query_weight_ptr,
+ hashtable_value_ptr,
+ cumulation_value_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query,
+ value_dim,
+ weight_dim,
+ value_offset,
+ weight_idx
+ );
+ }
+ }
+
+ }
+
+ return cumulation_value;
+
+}
+
+at::Tensor lsh_weighted_cumulation_ver2_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+) {
+
+ int batch_size = query_hash_code.size(0);
+ int num_hash_f = query_hash_code.size(2);
+
+ int num_query = query_hash_code.size(1);
+ int num_key = key_hash_code.size(1);
+ int value_dim = value.size(2);
+ int weight_dim = query_weight.size(2);
+
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
+ at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options());
+ at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options());
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
+
+ if (use_cuda) {
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ float *query_weight_ptr = query_weight.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+ float *key_weight_ptr = key_weight.data_ptr();
+ float *value_ptr = value.data_ptr();
+
+ int *count_sort_table_ptr = count_sort_table.data_ptr();
+ int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr();
+ int *query_info_ptr = query_info.data_ptr();
+
+ float *cumulation_value_ptr = cumulation_value.data_ptr();
+
+ {
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
+ dim3 blocks_step2(num_hash_f, batch_size);
+ int shared_mem = hashtable_capacity * sizeof(float);
+ count_sort_step1_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key
+ );
+ count_sort_step2_cuda_kernel<<>>(
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity
+ );
+ count_sort_step3_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ count_sort_table_ptr,
+ key_sorted_idxes_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key
+ );
+ }
+ {
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ extract_query_info_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ count_sort_table_ptr,
+ query_info_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query
+ );
+ }
+ {
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
+ dim3 blocks(num_query, num_hash_f, batch_size);
+ int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float);
+ lsh_weighted_cumulation_ver2_step2_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_info_ptr,
+ key_sorted_idxes_ptr,
+ query_weight_ptr,
+ key_weight_ptr,
+ value_ptr,
+ cumulation_value_ptr,
+ batch_size,
+ num_hash_f,
+ num_query,
+ num_key,
+ value_dim,
+ weight_dim
+ );
+ }
+ }
+
+ return cumulation_value;
+
+}
+
+at::Tensor lsh_weighted_cumulation_ver3_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+) {
+
+ int batch_size = query_hash_code.size(0);
+ int num_hash_f = query_hash_code.size(2);
+
+ int num_query = query_hash_code.size(1);
+ int num_key = key_hash_code.size(1);
+ int value_dim = value.size(2);
+ int weight_dim = query_weight.size(2);
+
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
+
+ if (use_cuda) {
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ float *query_weight_ptr = query_weight.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+ float *key_weight_ptr = key_weight.data_ptr();
+ float *value_ptr = value.data_ptr();
+
+ int *count_sort_table_ptr = count_sort_table.data_ptr();
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr();
+ int *key_info_ptr = key_info.data_ptr();
+
+ float *cumulation_value_ptr = cumulation_value.data_ptr();
+
+ {
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
+ dim3 blocks_step2(num_hash_f, batch_size);
+ int shared_mem = hashtable_capacity * sizeof(float);
+ count_sort_step1_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query
+ );
+ count_sort_step2_cuda_kernel<<>>(
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity
+ );
+ count_sort_step3_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ count_sort_table_ptr,
+ query_sorted_idxes_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query
+ );
+ }
+ {
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ extract_query_info_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ count_sort_table_ptr,
+ key_info_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key
+ );
+ }
+ {
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
+ dim3 blocks(num_key, num_hash_f, batch_size);
+ int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float);
+ lsh_weighted_cumulation_ver3_step2_cuda_kernel<<>>(
+ query_sorted_idxes_ptr,
+ key_mask_ptr,
+ key_info_ptr,
+ query_weight_ptr,
+ key_weight_ptr,
+ value_ptr,
+ cumulation_value_ptr,
+ batch_size,
+ num_hash_f,
+ num_query,
+ num_key,
+ value_dim,
+ weight_dim
+ );
+ }
+ }
+
+ return cumulation_value;
+
+}
+
+at::Tensor lsh_weighted_cumulation_ver4_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+) {
+
+ int batch_size = query_hash_code.size(0);
+ int num_hash_f = query_hash_code.size(2);
+
+ int num_query = query_hash_code.size(1);
+ int num_key = key_hash_code.size(1);
+ int value_dim = value.size(2);
+ int weight_dim = query_weight.size(2);
+
+ at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options());
+ at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options());
+ at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options());
+ at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options());
+
+ if (use_cuda) {
+
+ int *query_mask_ptr = query_mask.data_ptr();
+ int *query_hash_code_ptr = query_hash_code.data_ptr();
+ float *query_weight_ptr = query_weight.data_ptr();
+ int *key_mask_ptr = key_mask.data_ptr();
+ int *key_hash_code_ptr = key_hash_code.data_ptr();
+ float *key_weight_ptr = key_weight.data_ptr();
+ float *value_ptr = value.data_ptr();
+
+ int *count_sort_table_ptr = count_sort_table.data_ptr();
+ int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr();
+ int *key_info_ptr = key_info.data_ptr();
+
+ float *cumulation_value_ptr = cumulation_value.data_ptr();
+
+ {
+ dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK));
+ dim3 blocks_step2(num_hash_f, batch_size);
+ int shared_mem = hashtable_capacity * sizeof(float);
+ count_sort_step1_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query
+ );
+ count_sort_step2_cuda_kernel<<>>(
+ count_sort_table_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity
+ );
+ count_sort_step3_cuda_kernel<<>>(
+ query_mask_ptr,
+ query_hash_code_ptr,
+ count_sort_table_ptr,
+ query_sorted_idxes_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_query
+ );
+ }
+ {
+ dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f));
+ dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size);
+ extract_query_info_cuda_kernel<<>>(
+ key_mask_ptr,
+ key_hash_code_ptr,
+ count_sort_table_ptr,
+ key_info_ptr,
+ batch_size,
+ num_hash_f,
+ hashtable_capacity,
+ num_key
+ );
+ }
+ {
+ dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE);
+ dim3 blocks(num_key, batch_size);
+ int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float);
+ lsh_weighted_cumulation_ver4_step2_cuda_kernel<<>>(
+ query_sorted_idxes_ptr,
+ key_mask_ptr,
+ key_info_ptr,
+ query_weight_ptr,
+ key_weight_ptr,
+ value_ptr,
+ cumulation_value_ptr,
+ batch_size,
+ num_hash_f,
+ num_query,
+ num_key,
+ value_dim,
+ weight_dim
+ );
+ }
+ }
+
+ return cumulation_value;
+
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h
new file mode 100644
index 0000000000000000000000000000000000000000..dd48de0ed159f49ee3afe93b12aaae719fe87688
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h
@@ -0,0 +1,71 @@
+#include
+#include
+#include
+
+std::vector fast_hash_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_vector,
+ at::Tensor key_mask,
+ at::Tensor key_vector,
+ int num_hash_f,
+ int hash_code_len,
+ bool use_cuda
+);
+
+at::Tensor lsh_cumulation_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+);
+
+at::Tensor lsh_weighted_cumulation_ver1_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+);
+
+at::Tensor lsh_weighted_cumulation_ver2_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+);
+
+at::Tensor lsh_weighted_cumulation_ver3_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+);
+
+at::Tensor lsh_weighted_cumulation_ver4_kernel(
+ at::Tensor query_mask,
+ at::Tensor query_hash_code,
+ at::Tensor query_weight,
+ at::Tensor key_mask,
+ at::Tensor key_hash_code,
+ at::Tensor key_weight,
+ at::Tensor value,
+ int hashtable_capacity,
+ bool use_cuda
+);
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu
new file mode 100644
index 0000000000000000000000000000000000000000..ebc6260dd6db3ecaf8cb7b35c3c1a6e1ab3851dc
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu
@@ -0,0 +1,825 @@
+// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation_cuda.cu
+
+#include "fast_lsh_cumulation_cuda.h"
+#include "common_cuda_device.h"
+#include "common_cuda.h"
+#include "common.h"
+#include
+//////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+inline __device__ void fast_hadamard_transform(float *vector_buffer, int vector_dim, int dim_idx) {
+ int stride = vector_dim / 2;
+ while (stride > (WARP_SIZE / 2)) {
+ __syncthreads();
+ int sign = 1 - ((dim_idx / stride) % 2) * 2;
+ float val1 = vector_buffer[dim_idx];
+ float val2 = vector_buffer[dim_idx + sign * stride];
+ __syncthreads();
+ vector_buffer[dim_idx] = float(sign) * val1 + val2;
+ stride = stride / 2;
+ }
+
+ float val = vector_buffer[dim_idx];
+ #pragma unroll
+ for (stride = (WARP_SIZE / 2); stride > 0; stride = stride / 2) {
+ int sign = 1 - ((dim_idx / stride) % 2) * 2;
+ val = float(sign) * val + __shfl_xor_sync(FULL_MASK, val, stride);
+ }
+ vector_buffer[dim_idx] = val;
+}
+
+__global__ void fast_hash_ver1_cuda_kernel(
+ int *mask, // [batch_size, num_vector]
+ float *vector, // [batch_size, num_vector, vector_dim]
+ int *Dmat, // [batch_size, 3, num_part, vector_dim]
+ int *hash_code, // [batch_size, num_vector, num_hash_f]
+ int batch_size,
+ int num_vector,
+ int vector_dim,
+ int num_part,
+ int num_hash_f,
+ int hash_code_len
+) {
+
+ int batch_idx = blockIdx.z;
+ int vector_idx = blockIdx.y;
+ int part_idx = blockIdx.x;
+
+ int dim_idx = threadIdx.x;
+
+ int batch_idx__vector_idx = batch_idx * num_vector + vector_idx;
+ if (mask[batch_idx__vector_idx] == 0) {
+ return;
+ }
+
+ extern __shared__ float buffer[];
+ float *vector_buffer = buffer;
+
+ vector_buffer[dim_idx] = vector[batch_idx__vector_idx * vector_dim + dim_idx];
+
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 0) * num_part + part_idx) * vector_dim + dim_idx];
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 1) * num_part + part_idx) * vector_dim + dim_idx];
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
+ vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 2) * num_part + part_idx) * vector_dim + dim_idx];
+ fast_hadamard_transform(vector_buffer, vector_dim, dim_idx);
+
+ int num_hash_per_part = vector_dim / hash_code_len;
+ if (hash_code_len == 8 || hash_code_len == 16) {
+ int code = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
+ for (int offset = 1; offset < hash_code_len; offset = offset * 2) {
+ code += __shfl_xor_sync(FULL_MASK, code, offset);
+ }
+ if (dim_idx % hash_code_len == 0) {
+ int hash_f_idx = part_idx * num_hash_per_part + dim_idx / hash_code_len;
+ if (hash_f_idx < num_hash_f) {
+ hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
+ }
+ }
+ } else {
+ vector_buffer[dim_idx] = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0);
+ __syncthreads();
+ if (dim_idx < num_hash_per_part) {
+ int code = 0;
+ for (int i = 0; i < hash_code_len; i++) {
+ code += vector_buffer[dim_idx * hash_code_len + i];
+ }
+ int hash_f_idx = part_idx * num_hash_per_part + dim_idx;
+ if (hash_f_idx < num_hash_f) {
+ hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code;
+ }
+ }
+ }
+}
+
+__global__ void lsh_cumulation_ver1_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ float *value, // [batch_size, num_key, value_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key,
+ int value_dim,
+ int offset_warp
+) {
+
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx = blockIdx.y;
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ if (num_hash_f > WARP_SIZE) {
+ float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
+ int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
+ #pragma unroll
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
+ }
+ }
+ } else {
+ float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
+ int warp_hashcode = 0;
+ if (warp_thread_idx < num_hash_f) {
+ warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
+ }
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
+ }
+ }
+
+}
+
+__global__ void lsh_cumulation_ver1_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query,
+ int value_dim,
+ int offset_warp
+) {
+
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx = blockIdx.y;
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
+
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ if (query_mask[batch_idx__query_idx] == 0) {
+ return;
+ }
+
+ if (num_hash_f > WARP_SIZE) {
+ float warp_value = 0;
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
+ int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
+ #pragma unroll
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
+ }
+ }
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
+ } else {
+ float warp_value = 0;
+ int warp_hashcode = 0;
+ if (warp_thread_idx < num_hash_f) {
+ warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
+ }
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
+ }
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f);
+ }
+
+}
+
+__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key,
+ int value_dim,
+ int weight_dim,
+ int offset_warp,
+ int weight_idx
+) {
+
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx = blockIdx.y;
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ if (num_hash_f > WARP_SIZE) {
+ float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
+ int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx];
+ #pragma unroll
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
+ }
+ }
+ } else {
+ float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx];
+ int warp_hashcode = 0;
+ if (warp_thread_idx < num_hash_f) {
+ warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx];
+ }
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
+ atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value);
+ }
+ }
+
+}
+
+__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query,
+ int value_dim,
+ int weight_dim,
+ int offset_warp,
+ int weight_idx
+) {
+
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx = blockIdx.y;
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
+
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ if (query_mask[batch_idx__query_idx] == 0) {
+ return;
+ }
+
+ if (num_hash_f > WARP_SIZE) {
+ float warp_value = 0;
+ for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) {
+ int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx];
+ #pragma unroll
+ for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset);
+ int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode;
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
+ }
+ }
+ float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
+ } else {
+ float warp_value = 0;
+ int warp_hashcode = 0;
+ if (warp_thread_idx < num_hash_f) {
+ warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx];
+ }
+ for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) {
+ int current_hashcode = warp_hashcode;
+ current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx);
+ int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode;
+ warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx];
+ }
+ float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx];
+ cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f);
+ }
+
+}
+
+__global__ void count_sort_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key
+) {
+
+ int batch_idx = blockIdx.y;
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
+ int hash_f_idx = threadIdx.x;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
+ atomicAdd(&count_sort_table[(batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code], 1);
+
+}
+
+__global__ void count_sort_step2_cuda_kernel(
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity
+) {
+
+ int batch_idx = blockIdx.y;
+ int hash_f_idx = blockIdx.x;
+
+ int num_threads = blockDim.x;
+ int thread_id = threadIdx.x;
+
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
+
+ extern __shared__ float buffer[];
+ int *table_buffer = (int*)buffer;
+
+ if (thread_id == 0) {
+ table_buffer[0] = 0;
+ }
+ copy_data(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], &table_buffer[1], hashtable_capacity - 1, num_threads, thread_id);
+
+ for (int table_idx_start = 0; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + num_threads) {
+ int thread_value = table_buffer[table_idx_start + thread_id];
+ int next_thread_value = 0;
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ next_thread_value = __shfl_up_sync(FULL_MASK, thread_value, offset);
+ if (thread_id % WARP_SIZE >= offset) {
+ thread_value = thread_value + next_thread_value;
+ }
+ }
+ table_buffer[table_idx_start + thread_id] = thread_value;
+ }
+ __syncthreads();
+
+ if (hashtable_capacity > WARP_SIZE) {
+ if (thread_id < WARP_SIZE) {
+ for (int table_idx_start = WARP_SIZE; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + WARP_SIZE) {
+ table_buffer[table_idx_start + thread_id] += table_buffer[table_idx_start - 1];
+ }
+ }
+ }
+
+ copy_data(table_buffer, &count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], hashtable_capacity, num_threads, thread_id);
+
+}
+
+
+__global__ void count_sort_step3_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key
+) {
+
+ int batch_idx = blockIdx.y;
+ int key_idx = blockIdx.x * blockDim.y + threadIdx.y;
+ int hash_f_idx = threadIdx.x;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
+
+ int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx];
+ int sort_idx = atomicAdd(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity + hash_code], 1);
+ key_sorted_idxes[batch_idx__hash_f_idx * num_key + sort_idx] = key_idx;
+
+}
+
+__global__ void extract_query_info_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query
+) {
+
+ int batch_idx = blockIdx.y;
+ int query_idx = blockIdx.x * blockDim.y + threadIdx.y;
+ int hash_f_idx = threadIdx.x;
+
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ if (query_mask[batch_idx__query_idx] == 0) {
+ return;
+ }
+
+ int hash_code = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_idx];
+ int batch_idx__hash_f_idx__hash_code = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code;
+
+ int key_offset = select(hash_code == 0, 0, count_sort_table[batch_idx__hash_f_idx__hash_code - 1]);
+ int key_count = count_sort_table[batch_idx__hash_f_idx__hash_code] - key_offset;
+
+ query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx] = key_offset;
+ query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx] = key_count;
+
+}
+
+__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+) {
+
+ int batch_idx = blockIdx.z;
+ int hash_f_idx = blockIdx.y;
+ int query_idx = blockIdx.x;
+
+ int num_threads = blockDim.y * blockDim.x;
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
+
+ int num_warps = blockDim.y;
+ int warp_idx = threadIdx.y;
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ if (query_mask[batch_idx__query_idx] == 0) {
+ return;
+ }
+
+ int key_offset = query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx];
+ int key_count = query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx];
+
+ if (key_count == 0) {
+ return;
+ }
+
+ extern __shared__ float buffer[];
+
+ if (key_count == 1) {
+ if (warp_idx == 0) {
+ int key_idx = key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset];
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ float weight = 0;
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
+ int weight_dim_idx = weight_offset + warp_thread_idx;
+ float val = query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
+ }
+ weight = weight + val;
+ }
+ weight = weight / float(num_hash_f);
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+ int value_dim_idx = value_offset + warp_thread_idx;
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
+ }
+ }
+ } else {
+ float *weight_buffer = buffer;
+ int *key_idxes_buffer = (int*)&buffer[weight_dim];
+
+ copy_data_nonblocking(&query_weight[batch_idx__query_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
+
+ while (key_count > 0) {
+ int work_size = min(WARP_SIZE, key_count);
+ copy_data_nonblocking(&key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset], key_idxes_buffer, work_size, num_threads, thread_id);
+ __syncthreads();
+ for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
+ int work_idx = work_offset + warp_idx;
+ if (work_idx < key_count) {
+ int key_idx = key_idxes_buffer[work_idx];
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ float weight = 0;
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
+ int weight_dim_idx = weight_offset + warp_thread_idx;
+ float val = weight_buffer[weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx];
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
+ }
+ weight = weight + val;
+ }
+ weight = weight / float(num_hash_f);
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+ int value_dim_idx = value_offset + warp_thread_idx;
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
+ }
+ }
+ }
+ key_count = key_count - work_size;
+ key_offset = key_offset + work_size;
+ }
+ }
+
+}
+
+__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel(
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
+ int *key_mask, // [batch_size, num_key]
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+) {
+
+ int batch_idx = blockIdx.z;
+ int hash_f_idx = blockIdx.y;
+ int key_idx = blockIdx.x;
+
+ int num_threads = blockDim.y * blockDim.x;
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
+
+ int num_warps = blockDim.y;
+ int warp_idx = threadIdx.y;
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ int query_offset = key_info[batch_idx__key_idx * 2 * num_hash_f + hash_f_idx];
+ int query_count = key_info[(batch_idx__key_idx * 2 + 1) * num_hash_f + hash_f_idx];
+
+ if (query_count == 0) {
+ return;
+ }
+
+ extern __shared__ float buffer[];
+
+ if (query_count == 1) {
+ if (warp_idx == 0) {
+ int query_idx = query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset];
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ float weight = 0;
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
+ int weight_dim_idx = weight_offset + warp_thread_idx;
+ float val = key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
+ }
+ weight = weight + val;
+ }
+ weight = weight / float(num_hash_f);
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+ int value_dim_idx = value_offset + warp_thread_idx;
+ float val = value[batch_idx__key_idx * value_dim + value_dim_idx];
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
+ }
+ }
+ } else {
+ float *weight_buffer = buffer;
+ float *value_buffer = &buffer[weight_dim];
+ int *query_idxes_buffer = (int*)&buffer[weight_dim + value_dim];
+
+ copy_data_nonblocking(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
+ copy_data_nonblocking(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
+
+ while (query_count > 0) {
+ int work_size = min(WARP_SIZE, query_count);
+ copy_data_nonblocking(&query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset], query_idxes_buffer, work_size, num_threads, thread_id);
+ __syncthreads();
+ for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) {
+ int work_idx = work_offset + warp_idx;
+ if (work_idx < query_count) {
+ int query_idx = query_idxes_buffer[work_idx];
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+ float weight = 0;
+ for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) {
+ int weight_dim_idx = weight_offset + warp_thread_idx;
+ float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
+ }
+ weight = weight + val;
+ }
+ weight = weight / float(num_hash_f);
+ for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) {
+ int value_dim_idx = value_offset + warp_thread_idx;
+ float val = value_buffer[value_dim_idx];
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
+ }
+ }
+ }
+ query_count = query_count - work_size;
+ query_offset = query_offset + work_size;
+ }
+ }
+
+}
+
+__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
+ int *key_mask, // [batch_size, num_key]
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+) {
+
+ int batch_idx = blockIdx.y;
+ int key_idx = blockIdx.x;
+
+ int num_threads = blockDim.y * blockDim.x;
+ int thread_id = threadIdx.y * blockDim.x + threadIdx.x;
+
+ int num_warps = blockDim.y;
+ int warp_idx = threadIdx.y;
+ int warp_thread_idx = threadIdx.x;
+
+ int batch_idx__key_idx = batch_idx * num_key + key_idx;
+ if (key_mask[batch_idx__key_idx] == 0) {
+ return;
+ }
+
+ extern __shared__ float buffer[];
+ float *weight_buffer = buffer;
+ float *value_buffer = &buffer[weight_dim];
+ int *key_info_buffer = (int*)&buffer[weight_dim + value_dim];
+
+ copy_data_nonblocking(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id);
+ copy_data_nonblocking(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id);
+ copy_data_nonblocking(&key_info[batch_idx__key_idx * 2 * num_hash_f], key_info_buffer, 2 * num_hash_f, num_threads, thread_id);
+
+ int *query_offset_buffer = key_info_buffer;
+ int *query_count_buffer = &key_info_buffer[num_hash_f];
+
+ const int hashtable_size = 1024 + OPTIMAL_THREADS_PER_BLOCK;
+ __shared__ int hashtable_query[hashtable_size];
+ __shared__ int hashtable_count[hashtable_size];
+ __shared__ int inserted_query[hashtable_size];
+ __shared__ int query_counter[1];
+
+ int hash_f_idx_base = 0;
+
+ while (true) {
+
+ init_buffer_nonblocking(EMPTY_VALUE, hashtable_query, hashtable_size, num_threads, thread_id);
+ init_buffer_nonblocking(0, hashtable_count, hashtable_size, num_threads, thread_id);
+ init_buffer_nonblocking(EMPTY_VALUE, inserted_query, hashtable_size, num_threads, thread_id);
+ init_buffer_nonblocking(0, query_counter, 1, num_threads, thread_id);
+ __syncthreads();
+
+ while (hash_f_idx_base < num_hash_f) {
+
+ int hash_f_idx = hash_f_idx_base + warp_idx;
+ int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx;
+
+ int stop_flag = 0;
+
+ int query_offset = query_offset_buffer[hash_f_idx];
+ int query_count = query_count_buffer[hash_f_idx];
+
+ while (query_count > 0) {
+
+ int work_size = min(query_count, WARP_SIZE);
+
+ // try inserting query to set and check whether the query is new
+ int found_new_query = 0;
+ int query_idx = -1;
+ if (warp_thread_idx < work_size) {
+ query_idx = query_sorted_idxes[batch_idx__hash_f_idx * num_query + query_offset + warp_thread_idx];
+ int slot = set_insert(hashtable_query, hashtable_size, query_idx);
+ if (slot >= 0) {
+ found_new_query = atomicAdd(&hashtable_count[slot], 1) == 0;
+ }
+ }
+
+ // compute cumulative offset
+ int position_offset = found_new_query;
+ int next_position_offset = 0;
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ next_position_offset = __shfl_up_sync(FULL_MASK, position_offset, offset);
+ if (thread_id % WARP_SIZE >= offset) {
+ position_offset = position_offset + next_position_offset;
+ }
+ }
+
+ // get the inserted query list end index
+ int inserted_query_base = 0;
+ if (thread_id % WARP_SIZE == WARP_SIZE - 1) {
+ inserted_query_base = atomicAdd(query_counter, position_offset);
+ }
+ inserted_query_base = __shfl_sync(FULL_MASK, inserted_query_base, WARP_SIZE - 1);
+
+ // insert new queries to list
+ int insert_idx = inserted_query_base + position_offset - 1;
+ if (found_new_query) {
+ inserted_query[insert_idx] = query_idx;
+ }
+
+ // remove inserted queries from list
+ query_offset_buffer[hash_f_idx] += work_size;
+ query_count_buffer[hash_f_idx] -= work_size;
+ query_offset += work_size;
+ query_count -= work_size;
+
+ // if list is almost full, stop inserting
+ if (inserted_query_base + OPTIMAL_THREADS_PER_BLOCK > hashtable_size) {
+ stop_flag = 1;
+ break;
+ }
+
+ }
+
+ if (stop_flag) {
+ break;
+ }
+
+ hash_f_idx_base = hash_f_idx_base + num_warps;
+
+ }
+
+ __syncthreads();
+
+ int num_distint_query = query_counter[0];
+
+ if (num_distint_query > 0) {
+ for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) {
+ int idx = idx_base + warp_idx;
+ if (idx < num_distint_query) {
+ int query_idx = inserted_query[idx];
+ int batch_idx__query_idx = batch_idx * num_query + query_idx;
+
+ int slot = set_lookup(hashtable_query, hashtable_size, query_idx);
+ int duplicate_count = hashtable_count[slot];
+
+ float weight = 0;
+ for (int weight_idx_base = 0; weight_idx_base < weight_dim; weight_idx_base = weight_idx_base + WARP_SIZE) {
+ int weight_dim_idx = weight_idx_base + warp_thread_idx;
+ float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx];
+ #pragma unroll
+ for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) {
+ val += __shfl_xor_sync(FULL_MASK, val, offset);
+ }
+ weight = weight + val;
+ }
+
+ weight = (float)duplicate_count * weight / float(num_hash_f);
+
+ for (int value_idx_base = 0; value_idx_base < value_dim; value_idx_base = value_idx_base + WARP_SIZE) {
+ int value_dim_idx = value_idx_base + warp_thread_idx;
+ float val = value_buffer[value_dim_idx];
+ atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val);
+ }
+ }
+ }
+ } else {
+
+ // all computation is completed if num_distint_query == 0
+ break;
+
+ }
+
+ __syncthreads();
+
+ }
+
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h
new file mode 100644
index 0000000000000000000000000000000000000000..b2adc0f735358d0fcb6a056e7d19ba745977e129
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h
@@ -0,0 +1,157 @@
+__global__ void fast_hash_ver1_cuda_kernel(
+ int *mask, // [batch_size, num_vector]
+ float *vector, // [batch_size, num_vector, vector_dim]
+ int *Dmat, // [3, num_part, vector_dim]
+ int *hash_code, // [batch_size, num_vector, num_hash_f]
+ int batch_size,
+ int num_vector,
+ int vector_dim,
+ int num_part,
+ int num_hash_f,
+ int hash_code_len
+);
+
+__global__ void lsh_cumulation_ver1_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ float *value, // [batch_size, num_key, value_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key,
+ int value_dim,
+ int offset_warp
+);
+
+__global__ void lsh_cumulation_ver1_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query,
+ int value_dim,
+ int offset_warp
+);
+
+__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key,
+ int value_dim,
+ int weight_dim,
+ int offset_warp,
+ int weight_idx
+);
+
+__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query,
+ int value_dim,
+ int weight_dim,
+ int offset_warp,
+ int weight_idx
+);
+
+__global__ void count_sort_step1_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key
+);
+
+__global__ void count_sort_step2_cuda_kernel(
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity
+);
+
+__global__ void count_sort_step3_cuda_kernel(
+ int *key_mask, // [batch_size, num_key]
+ int *key_hash_code, // [batch_size, num_key, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_key
+);
+
+__global__ void extract_query_info_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_hash_code, // [batch_size, num_query, num_hash_f]
+ int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity]
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
+ int batch_size,
+ int num_hash_f,
+ int hashtable_capacity,
+ int num_query
+);
+
+__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel(
+ int *query_mask, // [batch_size, num_query]
+ int *query_info, // [batch_size, num_query, 2, num_hash_f]
+ int *key_sorted_idxes, // [batch_size, num_hash_f, num_key]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+);
+
+__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel(
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
+ int *key_mask, // [batch_size, num_key]
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+);
+
+__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel(
+ int *query_sorted_idxes, // [batch_size, num_hash_f, num_query]
+ int *key_mask, // [batch_size, num_key]
+ int *key_info, // [batch_size, num_key, 2, num_hash_f]
+ float *query_weight, // [batch_size, num_query, weight_dim]
+ float *key_weight, // [batch_size, num_key, weight_dim]
+ float *value, // [batch_size, num_key, value_dim]
+ float *cumulation_value, // [batch_size, num_query, value_dim]
+ int batch_size,
+ int num_hash_f,
+ int num_query,
+ int num_key,
+ int value_dim,
+ int weight_dim
+);
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e150a2be604b28f600ab345a8cc9e97819cca416
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp
@@ -0,0 +1,128 @@
+#include
+#include
+#include "fast_lsh_cumulation.h"
+#include "common_cuda.h"
+#include
+
+std::vector fast_hash(
+ at::Tensor query_mask,
+ at::Tensor query_vector,
+ at::Tensor key_mask,
+ at::Tensor key_vector,
+ int num_hash_f,
+ int hash_code_len,
+ bool use_cuda,
+ int version
+) {
+ return fast_hash_ver1_kernel(
+ query_mask,
+ query_vector,
+ key_mask,
+ key_vector,
+ num_hash_f,
+ hash_code_len,
+ use_cuda
+ );
+}
+
+at::Tensor lsh_cumulation(
+ at::Tensor query_mask, // [batch_size, num_query]
+ at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f]
+ at::Tensor key_mask, // [batch_size, num_key]
+ at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f]
+ at::Tensor value, // [batch_size, num_key, value_dim]
+ int hashtable_capacity,
+ bool use_cuda,
+ int version
+) {
+ return lsh_cumulation_ver1_kernel(
+ query_mask,
+ query_hash_code,
+ key_mask,
+ key_hash_code,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+}
+
+at::Tensor lsh_weighted_cumulation(
+ at::Tensor query_mask, // [batch_size, num_query]
+ at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f]
+ at::Tensor query_weight, // [batch_size, num_query, weight_dim]
+ at::Tensor key_mask, // [batch_size, num_key]
+ at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f]
+ at::Tensor key_weight, // [batch_size, num_key, weight_dim]
+ at::Tensor value, // [batch_size, num_key, value_dim]
+ int hashtable_capacity,
+ bool use_cuda,
+ int version
+) {
+ if (version == 1) {
+ return lsh_weighted_cumulation_ver1_kernel(
+ query_mask,
+ query_hash_code,
+ query_weight,
+ key_mask,
+ key_hash_code,
+ key_weight,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+ } else if (version == 2) {
+ return lsh_weighted_cumulation_ver2_kernel(
+ query_mask,
+ query_hash_code,
+ query_weight,
+ key_mask,
+ key_hash_code,
+ key_weight,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+ } else if (version == 3) {
+ return lsh_weighted_cumulation_ver3_kernel(
+ query_mask,
+ query_hash_code,
+ query_weight,
+ key_mask,
+ key_hash_code,
+ key_weight,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+ } else if (version == 4) {
+ return lsh_weighted_cumulation_ver4_kernel(
+ query_mask,
+ query_hash_code,
+ query_weight,
+ key_mask,
+ key_hash_code,
+ key_weight,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+ } else {
+ return lsh_weighted_cumulation_ver3_kernel(
+ query_mask,
+ query_hash_code,
+ query_weight,
+ key_mask,
+ key_hash_code,
+ key_weight,
+ value,
+ hashtable_capacity,
+ use_cuda
+ );
+ }
+}
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+ m.def("fast_hash", &fast_hash, "Fast Hash (CUDA)");
+ m.def("lsh_cumulation", &lsh_cumulation, "LSH Cumulation (CUDA)");
+ m.def("lsh_weighted_cumulation", &lsh_weighted_cumulation, "LSH Weighted Cumulation (CUDA)");
+}
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e234a4b01db188e83c4e21ba000d24f60b13b286
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__init__.py
@@ -0,0 +1,60 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
+ "processing_git": ["GitProcessor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_git"] = [
+ "GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GitForCausalLM",
+ "GitModel",
+ "GitPreTrainedModel",
+ "GitVisionModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
+ from .processing_git import GitProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_git import (
+ GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GitForCausalLM,
+ GitModel,
+ GitPreTrainedModel,
+ GitVisionModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93c799ccd9ce7563915e7cf6656612df11b20e56
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3334e34075e7a2c58a842d3b6ad9a986286569ff
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/configuration_git.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..caa60cd7b17f16a509c54edf1831df2c1df0f840
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/convert_git_to_pytorch.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..700208bb8a7db6a868dcefc2260ac97e8ae72b33
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/modeling_git.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb8ae08edd6fc7a77f5f7bb8d2eab03784f49c3e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/__pycache__/processing_git.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/configuration_git.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/configuration_git.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c28bbabff6b0b15d27644599fc9692e19aa1d9e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/configuration_git.py
@@ -0,0 +1,240 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from typing import Union
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class GitVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GitVisionModel`]. It is used to instantiate a GIT
+ vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the vision encoder of the GIT
+ [microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+
+ ```python
+ >>> from transformers import GitVisionConfig, GitVisionModel
+
+ >>> # Initializing a GitVisionConfig with microsoft/git-base style configuration
+ >>> configuration = GitVisionConfig()
+
+ >>> # Initializing a GitVisionModel (with random weights) from the microsoft/git-base style configuration
+ >>> model = GitVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "git_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=768,
+ intermediate_size=3072,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ num_channels=3,
+ image_size=224,
+ patch_size=16,
+ hidden_act="quick_gelu",
+ layer_norm_eps=1e-5,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.num_channels = num_channels
+ self.patch_size = patch_size
+ self.image_size = image_size
+ self.initializer_range = initializer_range
+ self.attention_dropout = attention_dropout
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from GITConfig
+ if config_dict.get("model_type") == "git":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class GitConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GitModel`]. It is used to instantiate a GIT model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the GIT
+ [microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`GitVisionConfig`].
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the GIT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`GitModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ num_image_with_embedding (`int`, *optional*):
+ The number of temporal embeddings to add, in case the model is used for video captioning/VQA.
+
+ Examples:
+
+ ```python
+ >>> from transformers import GitConfig, GitModel
+
+ >>> # Initializing a GIT microsoft/git-base style configuration
+ >>> configuration = GitConfig()
+
+ >>> # Initializing a model (with random weights) from the microsoft/git-base style configuration
+ >>> model = GitModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "git"
+
+ def __init__(
+ self,
+ vision_config=None,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=6,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=1024,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ position_embedding_type="absolute",
+ use_cache=True,
+ tie_word_embeddings=False,
+ bos_token_id=101,
+ eos_token_id=102,
+ num_image_with_embedding=None,
+ **kwargs,
+ ):
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
+
+ self.vision_config = GitVisionConfig(**vision_config)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.position_embedding_type = position_embedding_type
+ self.use_cache = use_cache
+ self.tie_word_embeddings = tie_word_embeddings
+ self.num_image_with_embedding = num_image_with_embedding
+
+ self.bos_token_id = bos_token_id
+ self.eos_token_id = eos_token_id
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e3e8e7b317905f9108acdd9e3b5b97c1445b24b
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/convert_git_to_pytorch.py
@@ -0,0 +1,428 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert GIT checkpoints from the original repository.
+
+URL: https://github.com/microsoft/GenerativeImage2Text/tree/main"""
+
+
+import argparse
+from pathlib import Path
+
+import numpy as np
+import requests
+import torch
+from huggingface_hub import hf_hub_download
+from PIL import Image
+from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
+
+from transformers import (
+ AutoTokenizer,
+ CLIPImageProcessor,
+ GitConfig,
+ GitForCausalLM,
+ GitProcessor,
+ GitVisionConfig,
+ VideoMAEImageProcessor,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def get_git_config(model_name):
+ if "base" in model_name and "vqa" in model_name:
+ image_size = 480
+ elif "large" in model_name and "vqa" in model_name:
+ image_size = 420
+ else:
+ image_size = 224
+
+ vision_config = GitVisionConfig(image_size=image_size)
+
+ if "large" in model_name:
+ vision_config.patch_size = 14
+ vision_config.hidden_size = 1024
+ vision_config.intermediate_size = 4096
+ vision_config.num_hidden_layers = 24
+ vision_config.num_attention_heads = 16
+
+ is_video = "vatex" in model_name or "msrvtt" in model_name
+ num_image_with_embedding = 6 if is_video else None
+ config = GitConfig(vision_config=vision_config.to_dict(), num_image_with_embedding=num_image_with_embedding)
+
+ return config, image_size, is_video
+
+
+# here we list all keys to be renamed (original name on the left, our name on the right)
+def create_rename_keys(config, prefix=""):
+ rename_keys = []
+
+ # image encoder
+ # ftm: off
+ rename_keys.append(
+ (f"{prefix}image_encoder.class_embedding", "git.image_encoder.vision_model.embeddings.class_embedding")
+ )
+ rename_keys.append(
+ (
+ f"{prefix}image_encoder.positional_embedding",
+ "git.image_encoder.vision_model.embeddings.position_embedding.weight",
+ )
+ )
+ rename_keys.append(
+ (f"{prefix}image_encoder.conv1.weight", "git.image_encoder.vision_model.embeddings.patch_embedding.weight")
+ )
+ rename_keys.append((f"{prefix}image_encoder.ln_pre.weight", "git.image_encoder.vision_model.pre_layrnorm.weight"))
+ rename_keys.append((f"{prefix}image_encoder.ln_pre.bias", "git.image_encoder.vision_model.pre_layrnorm.bias"))
+ rename_keys.append(
+ (f"{prefix}image_encoder.ln_post.weight", "git.image_encoder.vision_model.post_layernorm.weight")
+ )
+ rename_keys.append((f"{prefix}image_encoder.ln_post.bias", "git.image_encoder.vision_model.post_layernorm.bias"))
+ # fmt: on
+ rename_keys.append((f"{prefix}image_encoder.proj", "git.image_encoder.visual_projection.weight"))
+
+ # fmt: off
+ for i in range(config.vision_config.num_hidden_layers):
+ # image encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.weight"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.attn.out_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.out_proj.bias"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.weight"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_1.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm1.bias"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.weight"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_fc.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc1.bias"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.weight"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.mlp.c_proj.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.mlp.fc2.bias"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.weight", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.weight"))
+ rename_keys.append((f"{prefix}image_encoder.transformer.resblocks.{i}.ln_2.bias", f"git.image_encoder.vision_model.encoder.layers.{i}.layer_norm2.bias"))
+ # fmt: on
+
+ # text decoder
+ # fmt: off
+ rename_keys.append((f"{prefix}textual.embedding.words.weight", "git.embeddings.word_embeddings.weight"))
+ rename_keys.append((f"{prefix}textual.embedding.positions.weight", "git.embeddings.position_embeddings.weight"))
+ rename_keys.append((f"{prefix}textual.visual_projection.0.weight", "git.visual_projection.visual_projection.0.weight"))
+ rename_keys.append((f"{prefix}textual.visual_projection.0.bias", "git.visual_projection.visual_projection.0.bias"))
+ rename_keys.append((f"{prefix}textual.visual_projection.1.weight", "git.visual_projection.visual_projection.1.weight"))
+ rename_keys.append((f"{prefix}textual.visual_projection.1.bias", "git.visual_projection.visual_projection.1.bias"))
+
+ rename_keys.append((f"{prefix}textual.embedding.layer_norm.weight", "git.embeddings.LayerNorm.weight"))
+ rename_keys.append((f"{prefix}textual.embedding.layer_norm.bias", "git.embeddings.LayerNorm.bias"))
+ rename_keys.append((f"{prefix}textual.output.weight", "output.weight"))
+ rename_keys.append((f"{prefix}textual.output.bias", "output.bias"))
+ for i in range(config.num_hidden_layers):
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.weight", f"git.encoder.layer.{i}.attention.self.query.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.query.bias", f"git.encoder.layer.{i}.attention.self.query.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.weight", f"git.encoder.layer.{i}.attention.self.key.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.key.bias", f"git.encoder.layer.{i}.attention.self.key.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.weight", f"git.encoder.layer.{i}.attention.self.value.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.self.value.bias", f"git.encoder.layer.{i}.attention.self.value.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.weight", f"git.encoder.layer.{i}.attention.output.dense.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.dense.bias", f"git.encoder.layer.{i}.attention.output.dense.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.weight", f"git.encoder.layer.{i}.attention.output.LayerNorm.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.attention.output.LayerNorm.bias", f"git.encoder.layer.{i}.attention.output.LayerNorm.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.weight", f"git.encoder.layer.{i}.intermediate.dense.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.intermediate.dense.bias", f"git.encoder.layer.{i}.intermediate.dense.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.weight", f"git.encoder.layer.{i}.output.dense.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.dense.bias", f"git.encoder.layer.{i}.output.dense.bias"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.weight", f"git.encoder.layer.{i}.output.LayerNorm.weight"))
+ rename_keys.append((f"{prefix}textual.transformer.encoder.layer.{i}.output.LayerNorm.bias", f"git.encoder.layer.{i}.output.LayerNorm.bias"))
+ # fmt: on
+
+ if config.num_image_with_embedding is not None:
+ rename_keys.append(("img_temperal_embedding.0", "git.img_temperal_embedding.0"))
+ rename_keys.append(("img_temperal_embedding.1", "git.img_temperal_embedding.1"))
+ rename_keys.append(("img_temperal_embedding.2", "git.img_temperal_embedding.2"))
+ rename_keys.append(("img_temperal_embedding.3", "git.img_temperal_embedding.3"))
+ rename_keys.append(("img_temperal_embedding.4", "git.img_temperal_embedding.4"))
+ rename_keys.append(("img_temperal_embedding.5", "git.img_temperal_embedding.5"))
+
+ return rename_keys
+
+
+def rename_key(dct, old, new):
+ val = dct.pop(old)
+ dct[new] = val.T if "image_encoder.visual_projection" in new else val
+
+
+# we split up the matrix of each CLIP encoder layer into queries, keys and values
+def read_in_q_k_v(state_dict, config, prefix=""):
+ dim = config.vision_config.hidden_size
+ for i in range(config.vision_config.num_hidden_layers):
+ # read in weights + bias of input projection layer (in the original implementation, this is a single matrix + bias)
+ in_proj_weight = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_weight")
+ in_proj_bias = state_dict.pop(f"{prefix}image_encoder.transformer.resblocks.{i}.attn.in_proj_bias")
+ # next, add query, keys and values (in that order) to the state dict
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[
+ :dim, :
+ ]
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:dim]
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[
+ dim : dim * 2, :
+ ]
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[
+ dim : dim * 2
+ ]
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[
+ -dim:, :
+ ]
+ state_dict[f"git.image_encoder.vision_model.encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-dim:]
+
+
+# We will verify our results on an image
+def prepare_img(model_name):
+ if "textvqa" in model_name:
+ filepath = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
+ image = Image.open(filepath).convert("RGB")
+ else:
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ image = Image.open(requests.get(url, stream=True).raw)
+
+ return image
+
+
+def prepare_video():
+ from decord import VideoReader, cpu
+
+ # set seed for reproducability
+ np.random.seed(0)
+
+ def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ """
+ Sample a given number of frame indices from the video.
+
+ Args:
+ clip_len (`int`): Total number of frames to sample.
+ frame_sample_rate (`int`): Sample every n-th frame.
+ seg_len (`int`): Maximum allowed index of sample's last frame.
+
+ Returns:
+ indices (`List[int]`): List of sampled frame indices
+ """
+ converted_len = int(clip_len * frame_sample_rate)
+ end_idx = np.random.randint(converted_len, seg_len)
+ start_idx = end_idx - converted_len
+ indices = np.linspace(start_idx, end_idx, num=clip_len)
+ indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ return indices
+
+ # video clip consists of 300 frames (10 seconds at 30 FPS)
+ file_path = hf_hub_download(repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset")
+ videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0))
+
+ # sample 6 frames
+ videoreader.seek(0)
+ indices = sample_frame_indices(clip_len=6, frame_sample_rate=4, seg_len=len(videoreader))
+ video = videoreader.get_batch(indices).asnumpy()
+
+ return video
+
+
+@torch.no_grad()
+def convert_git_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
+ """
+ Copy/paste/tweak model's weights to our GIT structure.
+ """
+
+ model_name_to_url = {
+ "git-base": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE/snapshot/model.pt",
+ "git-base-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_COCO/snapshot/model.pt",
+ "git-base-textcaps": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTCAPS/snapshot/model.pt",
+ "git-base-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VQAv2/snapshot/model.pt",
+ "git-base-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_TEXTVQA/snapshot/model.pt", # todo
+ "git-base-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_VATEX/snapshot/model.pt",
+ "git-base-msrvtt-qa": (
+ "https://publicgit.blob.core.windows.net/data/output/GIT_BASE_MSRVTT_QA/snapshot/model.pt"
+ ),
+ "git-large": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE/snapshot/model.pt",
+ "git-large-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_COCO/snapshot/model.pt",
+ "git-large-textcaps": (
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTCAPS/snapshot/model.pt"
+ ),
+ "git-large-vqav2": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VQAv2/snapshot/model.pt",
+ "git-large-textvqa": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_TEXTVQA/snapshot/model.pt",
+ "git-large-vatex": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_VATEX/snapshot/model.pt",
+ "git-large-msrvtt-qa": (
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_MSRVTT_QA/snapshot/model.pt"
+ ),
+ "git-large-r": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R/snapshot/model.pt",
+ "git-large-r-coco": "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_COCO/snapshot/model.pt",
+ "git-large-r-textcaps": (
+ "https://publicgit.blob.core.windows.net/data/output/GIT_LARGE_R_TEXTCAPS/snapshot/model.pt"
+ ),
+ }
+
+ model_name_to_path = {
+ "git-large": "/Users/nielsrogge/Documents/GIT/git_large_model.pt",
+ "git-large-coco": "/Users/nielsrogge/Documents/GIT/git_large_coco_model.pt",
+ "git-large-textcaps": "/Users/nielsrogge/Documents/GIT/git_large_textcaps_model.pt",
+ "git-large-vqav2": "/Users/nielsrogge/Documents/GIT/git_large_vqav2_model.pt",
+ "git-large-textvqa": "/Users/nielsrogge/Documents/GIT/git_large_textvqa_model.pt",
+ }
+
+ # define GIT configuration based on model name
+ config, image_size, is_video = get_git_config(model_name)
+ if "large" in model_name and not is_video and "large-r" not in model_name:
+ # large checkpoints take way too long to download
+ checkpoint_path = model_name_to_path[model_name]
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+ else:
+ checkpoint_url = model_name_to_url[model_name]
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", file_name=model_name)[
+ "model"
+ ]
+ # rename keys
+ prefix = "module." if model_name == "git-base" else ""
+ rename_keys = create_rename_keys(config, prefix=prefix)
+ for src, dest in rename_keys:
+ rename_key(state_dict, src, dest)
+ read_in_q_k_v(state_dict, config, prefix=prefix)
+
+ # load HuggingFace model
+ model = GitForCausalLM(config)
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
+ model.eval()
+
+ print("Missing keys:", missing_keys)
+ print("Unexpected keys:", unexpected_keys)
+
+ assert missing_keys == ["git.embeddings.position_ids", "git.image_encoder.vision_model.embeddings.position_ids"]
+ assert unexpected_keys == ["git.image_encoder.visual_projection.weight"]
+
+ # verify results
+ image_processor = (
+ VideoMAEImageProcessor(
+ size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
+ )
+ if is_video
+ else CLIPImageProcessor(
+ size={"shortest_edge": image_size}, crop_size={"height": image_size, "width": image_size}
+ )
+ )
+ tokenizer = AutoTokenizer.from_pretrained(
+ "google-bert/bert-base-uncased", model_input_names=["input_ids", "attention_mask"]
+ )
+ processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
+
+ if is_video:
+ video = prepare_video()
+ pixel_values = processor(images=list(video), return_tensors="pt").pixel_values
+ else:
+ image = prepare_img(model_name)
+ image_transforms = Compose(
+ [
+ Resize(image_size, interpolation=Image.BICUBIC),
+ CenterCrop(image_size),
+ ToTensor(),
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
+ ]
+ )
+ original_pixel_values = image_transforms(image).unsqueeze(0)
+ pixel_values = processor(images=image, return_tensors="pt").pixel_values
+
+ assert torch.allclose(pixel_values, original_pixel_values)
+
+ input_ids = torch.tensor([[101]])
+ outputs = model(input_ids, pixel_values=pixel_values)
+ logits = outputs.logits
+ print("Logits:", logits[0, -1, :3])
+
+ if model_name == "git-base":
+ expected_slice_logits = torch.tensor([-1.2832, -1.2835, -1.2840])
+ elif model_name == "git-base-coco":
+ expected_slice_logits = torch.tensor([-0.9925, -0.9930, -0.9935])
+ elif model_name == "git-base-textcaps":
+ expected_slice_logits = torch.tensor([-1.2980, -1.2983, -1.2985])
+ elif model_name == "git-base-vqav2":
+ expected_slice_logits = torch.tensor([-0.8570, -0.8568, -0.8561])
+ elif model_name == "git-base-textvqa":
+ expected_slice_logits = torch.tensor([-1.4085, -1.4083, -1.4082])
+ elif model_name == "git-base-vatex":
+ expected_slice_logits = torch.tensor([-1.3451, -1.3447, -1.3447])
+ elif model_name == "git-base-msrvtt-qa":
+ expected_slice_logits = torch.tensor([-0.8554, -0.8550, -0.8540])
+ elif model_name == "git-large":
+ expected_slice_logits = torch.tensor([-1.1708, -1.1707, -1.1705])
+ elif model_name == "git-large-coco":
+ expected_slice_logits = torch.tensor([-1.0425, -1.0423, -1.0422])
+ elif model_name == "git-large-textcaps":
+ expected_slice_logits = torch.tensor([-1.2705, -1.2708, -1.2706])
+ elif model_name == "git-large-vqav2":
+ expected_slice_logits = torch.tensor([-0.7042, -0.7043, -0.7043])
+ elif model_name == "git-large-textvqa":
+ expected_slice_logits = torch.tensor([-0.8590, -0.8592, -0.8590])
+ elif model_name == "git-large-vatex":
+ expected_slice_logits = torch.tensor([-1.0113, -1.0114, -1.0113])
+ elif model_name == "git-large-msrvtt-qa":
+ expected_slice_logits = torch.tensor([0.0130, 0.0134, 0.0131])
+ elif model_name == "git-large-r":
+ expected_slice_logits = torch.tensor([-1.1283, -1.1285, -1.1286])
+ elif model_name == "git-large-r-coco":
+ expected_slice_logits = torch.tensor([-0.9641, -0.9641, -0.9641])
+ elif model_name == "git-large-r-textcaps":
+ expected_slice_logits = torch.tensor([-1.1121, -1.1120, -1.1124])
+
+ assert torch.allclose(logits[0, -1, :3], expected_slice_logits, atol=1e-4)
+ print("Looks ok!")
+
+ prompt = ""
+ if "textvqa" in model_name:
+ prompt = "what does the front of the bus say at the top?"
+ elif "msrvtt-qa" in model_name:
+ prompt = "what does the woman eat?"
+ elif "vqa" in model_name:
+ prompt = "what are the cats doing?"
+ input_ids = tokenizer(prompt, add_special_tokens=False).input_ids
+ input_ids = [processor.tokenizer.cls_token_id] + input_ids
+ input_ids = torch.tensor(input_ids).unsqueeze(0)
+ print("Generating caption...")
+ generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
+ print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
+
+ if pytorch_dump_folder_path is not None:
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}")
+ model.save_pretrained(pytorch_dump_folder_path)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ if push_to_hub:
+ print(f"Pushing model and processor of {model_name} to the hub...")
+ model.push_to_hub(f"microsoft/{model_name}")
+ processor.push_to_hub(f"microsoft/{model_name}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--model_name",
+ default="git-base",
+ type=str,
+ help="Name of the model you'd like to convert.",
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ help="Path to the output PyTorch model directory.",
+ )
+ parser.add_argument(
+ "--push_to_hub",
+ action="store_true",
+ help="Whether to push the model to the hub.",
+ )
+
+ args = parser.parse_args()
+ convert_git_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/modeling_git.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/modeling_git.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8953d498428eac44d3b4f2d26673557ab48c30a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/modeling_git.py
@@ -0,0 +1,1543 @@
+# coding=utf-8
+# Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch GIT model."""
+
+
+import math
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...file_utils import ModelOutput
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPast,
+ BaseModelOutputWithPooling,
+ CausalLMOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_git import GitConfig, GitVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/git-base"
+_CONFIG_FOR_DOC = "GitConfig"
+
+
+from ..deprecated._archive_maps import GIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git
+class GitVisionModelOutput(ModelOutput):
+ """
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
+
+ Args:
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+class GitEmbeddings(nn.Module):
+ """Construct the embeddings from word and position embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values_length: int = 0,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
+
+ if inputs_embeds is None:
+ embeddings = self.word_embeddings(input_ids)
+ else:
+ embeddings = inputs_embeds
+
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class GitSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
+ if config.num_image_with_embedding is not None:
+ self.image_patch_tokens *= config.num_image_with_embedding
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ pixel_values_present: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ cutoff = self.image_patch_tokens if pixel_values_present else 0
+ if past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([key_layer[:, :, :cutoff, :], past_key_value[0], key_layer[:, :, -1:, :]], dim=2)
+ value_layer = torch.cat(
+ [value_layer[:, :, :cutoff, :], past_key_value[1], value_layer[:, :, -1:, :]], dim=2
+ )
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ # NOTE: like in other caches, we store the text component. In GIT it means we discard the image component.
+ past_key_value = (
+ key_layer[:, :, cutoff:, :],
+ value_layer[:, :, cutoff:, :],
+ )
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in GitModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class GitSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class GitAttention(nn.Module):
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.__init__ with Bert->Git
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = GitSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = GitSelfOutput(config)
+ self.pruned_heads = set()
+
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ pixel_values_present: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ past_key_value,
+ output_attentions,
+ pixel_values_present,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class GitIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class GitOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class GitLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = GitAttention(config)
+ self.intermediate = GitIntermediate(config)
+ self.output = GitOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ pixel_values_present: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ pixel_values_present=pixel_values_present,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class GitEncoder(nn.Module):
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Git
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([GitLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ pixel_values_present: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]:
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ past_key_value,
+ output_attentions,
+ pixel_values_present,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class GitPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GitConfig
+ base_model_prefix = "git"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, GitVisionEmbeddings):
+ nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range)
+ nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range)
+ nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range)
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+GIT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`GitConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GIT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git
+class GitVisionEmbeddings(nn.Module):
+ def __init__(self, config: GitVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
+ batch_size = pixel_values.shape[0]
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP
+class GitVisionMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPAttention
+class GitVisionAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {causal_attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GitVision
+class GitVisionEncoderLayer(nn.Module):
+ def __init__(self, config: GitVisionConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = GitVisionAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = GitVisionMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ causal_attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->GitVision, CLIPConfig
+class GitVisionEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`GitVisionEncoderLayer`].
+
+ Args:
+ config: GitVisionConfig
+ """
+
+ def __init__(self, config: GitVisionConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+GIT_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class GitVisionTransformer(nn.Module):
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPEncoder->GitVisionEncoder, CLIP->Git
+ def __init__(self, config: GitVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = GitVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = GitVisionEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+ hidden_states = self.pre_layrnorm(hidden_states)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ last_hidden_state = self.post_layernorm(last_hidden_state)
+
+ if not return_dict:
+ return (last_hidden_state,) + encoder_outputs[1:]
+
+ return BaseModelOutput(
+ last_hidden_state=last_hidden_state,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """The vision model from CLIP, used in GIT, without any head or projection on top.""",
+ GIT_START_DOCSTRING,
+)
+class GitVisionModel(GitPreTrainedModel):
+ config_class = GitVisionConfig
+ main_input_name = "pixel_values"
+
+ # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git
+ def __init__(self, config: GitVisionConfig):
+ super().__init__(config)
+ self.vision_model = GitVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.vision_model.embeddings.patch_embedding
+
+ @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GitVisionModel
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
+ >>> model = GitVisionModel.from_pretrained("microsoft/git-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ return self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class GitProjection(nn.Module):
+ def __init__(self, config: GitConfig):
+ super().__init__()
+ self.config = config
+ self.visual_projection = nn.Sequential(
+ nn.Linear(config.vision_config.hidden_size, config.hidden_size),
+ nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps),
+ )
+
+ def forward(self, embeddings: torch.Tensor) -> torch.Tensor:
+ return self.visual_projection(embeddings)
+
+
+@add_start_docstrings(
+ "The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states"
+ " without any specific head on top.",
+ GIT_START_DOCSTRING,
+)
+class GitModel(GitPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = GitEmbeddings(config)
+ self.image_encoder = GitVisionModel(config.vision_config)
+ self.encoder = GitEncoder(config)
+
+ self.visual_projection = GitProjection(config)
+
+ if config.num_image_with_embedding is not None:
+ self.img_temperal_embedding = nn.ParameterList(
+ nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
+ for _ in range(config.num_image_with_embedding)
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
+ # Default mask is for forward direction. Flip for backward direction.
+ mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
+ mask = mask.masked_fill(mask == 1, float("-inf"))
+ return mask
+
+ def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
+ num_tgt = tgt.shape[1]
+ num_memory = memory.shape[1]
+ device = tgt.device
+ dtype = tgt.dtype
+ top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
+ top_right = torch.full(
+ (num_memory, num_tgt + past_key_values_length),
+ float("-inf"),
+ device=tgt.device,
+ dtype=dtype,
+ )
+ bottom_left = torch.zeros(
+ (num_tgt, num_memory),
+ dtype=dtype,
+ device=tgt_mask.device,
+ )
+
+ if past_key_values_length > 0:
+ tgt_mask = torch.zeros(
+ (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
+ dtype=dtype,
+ device=tgt_mask.device,
+ )
+
+ left = torch.cat((top_left, bottom_left), dim=0)
+ right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
+
+ full_attention_mask = torch.cat((left, right), dim=1)[None, :]
+
+ if memory_key_padding_mask is None:
+ memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
+ # if it is False, it means valid. That is, it is not a padding
+ if memory_key_padding_mask.dtype != torch.bool:
+ raise ValueError("Memory key padding mask must be a boolean tensor.")
+ zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
+ zero_negative_infinity[memory_key_padding_mask] = float("-inf")
+ full_attention_mask = full_attention_mask.expand(
+ (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
+ )
+ full_attention_mask = full_attention_mask.clone()
+ origin_left = full_attention_mask[:, :, :num_memory]
+ update = zero_negative_infinity[:, None, :]
+ full_attention_mask[:, :, :num_memory] = origin_left + update
+
+ # add axis for multi-head
+ full_attention_mask = full_attention_mask[:, None, :, :]
+
+ return full_attention_mask
+
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
+ r"""
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoProcessor, AutoModel
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
+ >>> model = AutoModel.from_pretrained("microsoft/git-base")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> text = "this is an image of two cats"
+
+ >>> inputs = processor(text, images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ seq_length = input_shape[1]
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ projected_visual_features = None
+ if pixel_values is not None:
+ if pixel_values.ndim == 4:
+ # here we assume pixel_values is of shape (batch_size, num_channels, height, width)
+ visual_features = self.image_encoder(pixel_values).last_hidden_state
+
+ elif pixel_values.ndim == 5:
+ # here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width)
+ visual_features = []
+ for frame_idx in range(pixel_values.shape[1]):
+ visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state
+ visual_features_frame += self.img_temperal_embedding[frame_idx]
+ visual_features.append(visual_features_frame)
+
+ # finally, concatenate all features along sequence dimension
+ visual_features = torch.cat(visual_features, dim=1)
+
+ else:
+ raise ValueError("pixel_values must be of rank 4 or 5")
+
+ projected_visual_features = self.visual_projection(visual_features)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+
+ if projected_visual_features is None:
+ projected_visual_features = torch.zeros(
+ (embedding_output.shape[0], 0, embedding_output.shape[2]),
+ dtype=embedding_output.dtype,
+ device=embedding_output.device,
+ )
+
+ # Repeat visual features to match embedding batch size.
+ projected_visual_features = projected_visual_features.repeat(
+ embedding_output.size(0) // projected_visual_features.size(0), 1, 1
+ )
+
+ # concatenate patch token and text token embeddings
+ hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
+
+ # By default, an additive causal mask is created
+ # for masking the future (one direction).
+ tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
+
+ # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
+ combined_attention_mask = self.create_attention_mask(
+ tgt=embedding_output,
+ memory=projected_visual_features,
+ tgt_mask=tgt_mask,
+ past_key_values_length=past_key_values_length,
+ )
+
+ if attention_mask is not None:
+ # if the user provides an attention mask, we add it to the default one
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ expanded_attn_mask = _prepare_4d_attention_mask(
+ attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]
+ ).to(embedding_output.device)
+ if past_key_values_length > 0:
+ expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
+ else:
+ combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ pixel_values_present=pixel_values is not None,
+ )
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPast(
+ last_hidden_state=sequence_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING
+)
+class GitForCausalLM(GitPreTrainedModel):
+ _tied_weights_keys = ["output.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.git = GitModel(config)
+ self.output = nn.Linear(config.hidden_size, config.vocab_size)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.output
+
+ def set_output_embeddings(self, new_embeddings):
+ self.output = new_embeddings
+
+ @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ pixel_values: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.Tensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
+ `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
+ ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ Returns:
+
+ Examples:
+
+ Image captioning example:
+
+ ```python
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
+ >>> import requests
+ >>> from PIL import Image
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco")
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
+
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
+ >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ >>> print(generated_caption)
+ two cats sleeping on a pink blanket next to remotes.
+ ```
+
+ Visual question answering (VQA) example:
+
+ ```python
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
+ >>> from huggingface_hub import hf_hub_download
+ >>> from PIL import Image
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa")
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa")
+
+ >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset")
+ >>> image = Image.open(file_path).convert("RGB")
+
+ >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
+
+ >>> question = "what does the front of the bus say at the top?"
+
+ >>> input_ids = processor(text=question, add_special_tokens=False).input_ids
+ >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids
+ >>> input_ids = torch.tensor(input_ids).unsqueeze(0)
+
+ >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
+ >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True))
+ ['what does the front of the bus say at the top? special']
+ ```
+
+ Video captioning example:
+
+ ```python
+ >>> import av
+ >>> import numpy as np
+ >>> from PIL import Image
+ >>> from huggingface_hub import hf_hub_download
+ >>> from transformers import AutoProcessor, AutoModelForCausalLM
+
+ >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex")
+ >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex")
+
+ >>> # set seed for reproducability
+ >>> np.random.seed(45)
+
+
+ >>> def read_video_pyav(container, indices):
+ ... '''
+ ... Decode the video with PyAV decoder.
+ ... Args:
+ ... container (`av.container.input.InputContainer`): PyAV container.
+ ... indices (`List[int]`): List of frame indices to decode.
+ ... Returns:
+ ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
+ ... '''
+ ... frames = []
+ ... container.seek(0)
+ ... start_index = indices[0]
+ ... end_index = indices[-1]
+ ... for i, frame in enumerate(container.decode(video=0)):
+ ... if i > end_index:
+ ... break
+ ... if i >= start_index and i in indices:
+ ... frames.append(frame)
+ ... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
+
+
+ >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
+ ... '''
+ ... Sample a given number of frame indices from the video.
+ ... Args:
+ ... clip_len (`int`): Total number of frames to sample.
+ ... frame_sample_rate (`int`): Sample every n-th frame.
+ ... seg_len (`int`): Maximum allowed index of sample's last frame.
+ ... Returns:
+ ... indices (`List[int]`): List of sampled frame indices
+ ... '''
+ ... converted_len = int(clip_len * frame_sample_rate)
+ ... end_idx = np.random.randint(converted_len, seg_len)
+ ... start_idx = end_idx - converted_len
+ ... indices = np.linspace(start_idx, end_idx, num=clip_len)
+ ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
+ ... return indices
+
+
+ >>> # load video
+ >>> file_path = hf_hub_download(
+ ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
+ ... )
+ >>> container = av.open(file_path)
+
+ >>> # sample frames
+ >>> num_frames = model.config.num_image_with_embedding
+ >>> indices = sample_frame_indices(
+ ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames
+ ... )
+ >>> frames = read_video_pyav(container, indices)
+
+ >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values
+
+ >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
+
+ >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True))
+ Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.']
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.git(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ pixel_values=pixel_values,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ logits = self.output(sequence_output)
+
+ loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens
+ shifted_logits = logits[:, num_image_tokens:-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ input_ids = input_ids[:, -1:]
+
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ input_shape = input_ids.shape
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_shape)
+
+ return {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "pixel_values": kwargs.get("pixel_values", None),
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ def _reorder_cache(self, past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/models/git/processing_git.py b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/processing_git.py
new file mode 100644
index 0000000000000000000000000000000000000000..79f26f3bf24b14116c43a66b7e9ebb682ee667a2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/models/git/processing_git.py
@@ -0,0 +1,113 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Image/Text processor class for GIT
+"""
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class GitProcessor(ProcessorMixin):
+ r"""
+ Constructs a GIT processor which wraps a CLIP image processor and a BERT tokenizer into a single processor.
+
+ [`GitProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`BertTokenizerFast`]. See the
+ [`~GitProcessor.__call__`] and [`~GitProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`AutoImageProcessor`]):
+ The image processor is a required input.
+ tokenizer ([`AutoTokenizer`]):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "AutoImageProcessor"
+ tokenizer_class = "AutoTokenizer"
+
+ def __init__(self, image_processor, tokenizer):
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
+ """
+
+ if text is None and images is None:
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
+
+ if text is not None:
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
+
+ if images is not None:
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
+
+ if text is not None and images is not None:
+ encoding["pixel_values"] = image_features.pixel_values
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ return ["input_ids", "attention_mask", "pixel_values"]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3409af4cd78c62daa258303472fa5a6345b090d5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from .auto import AutoHfQuantizer, AutoQuantizationConfig
+from .base import HfQuantizer
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4b1b4b714a79ef032e5352c214052e9c2221686
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ed3287e6ac9c191f1b5734ea6aea2acdf414a5e
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/auto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c0579b67b9776d6b67be2707a5be4b9b8fae89a
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/base.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3687d525e647421a22bf490d3eb4badb7efe1f5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_aqlm.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d91b2b9ca350d4b8e92099e10f7a5f27242b815c
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_awq.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb62be730883cf1414882c942ed6f75aa79e4747
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_4bit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a8068688b1a1527c6005c5822f01fb8d31ba984
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_bnb_8bit.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9110daa6d52121b57ff7475da38e03d6390b0ea
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_gptq.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34f99c4b185aea4fee4d789b4a6dd910a8d7ef51
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizer_quanto.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c2bb20bf482b8503d3a6bb056352520ae017919
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/__pycache__/quantizers_utils.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/auto.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/auto.py
new file mode 100644
index 0000000000000000000000000000000000000000..616e206a45b596caacca8cfacc417de7e9a3cd8e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/auto.py
@@ -0,0 +1,161 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import warnings
+from typing import Dict, Optional, Union
+
+from ..models.auto.configuration_auto import AutoConfig
+from ..utils.quantization_config import (
+ AqlmConfig,
+ AwqConfig,
+ BitsAndBytesConfig,
+ GPTQConfig,
+ QuantizationConfigMixin,
+ QuantizationMethod,
+ QuantoConfig,
+)
+from .quantizer_aqlm import AqlmHfQuantizer
+from .quantizer_awq import AwqQuantizer
+from .quantizer_bnb_4bit import Bnb4BitHfQuantizer
+from .quantizer_bnb_8bit import Bnb8BitHfQuantizer
+from .quantizer_gptq import GptqHfQuantizer
+from .quantizer_quanto import QuantoHfQuantizer
+
+
+AUTO_QUANTIZER_MAPPING = {
+ "awq": AwqQuantizer,
+ "bitsandbytes_4bit": Bnb4BitHfQuantizer,
+ "bitsandbytes_8bit": Bnb8BitHfQuantizer,
+ "gptq": GptqHfQuantizer,
+ "aqlm": AqlmHfQuantizer,
+ "quanto": QuantoHfQuantizer,
+}
+
+AUTO_QUANTIZATION_CONFIG_MAPPING = {
+ "awq": AwqConfig,
+ "bitsandbytes_4bit": BitsAndBytesConfig,
+ "bitsandbytes_8bit": BitsAndBytesConfig,
+ "gptq": GPTQConfig,
+ "aqlm": AqlmConfig,
+ "quanto": QuantoConfig,
+}
+
+
+class AutoQuantizationConfig:
+ """
+ The Auto-HF quantization config class that takes care of automatically dispatching to the correct
+ quantization config given a quantization config stored in a dictionary.
+ """
+
+ @classmethod
+ def from_dict(cls, quantization_config_dict: Dict):
+ quant_method = quantization_config_dict.get("quant_method", None)
+ # We need a special care for bnb models to make sure everything is BC ..
+ if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
+ suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
+ quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
+ elif quant_method is None:
+ raise ValueError(
+ "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
+ )
+
+ if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
+ raise ValueError(
+ f"Unknown quantization type, got {quant_method} - supported types are:"
+ f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
+ )
+
+ target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
+ return target_cls.from_dict(quantization_config_dict)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
+ if getattr(model_config, "quantization_config", None) is None:
+ raise ValueError(
+ f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
+ )
+ quantization_config_dict = model_config.quantization_config
+ quantization_config = cls.from_dict(quantization_config_dict)
+ # Update with potential kwargs that are passed through from_pretrained.
+ quantization_config.update(kwargs)
+ return quantization_config
+
+
+class AutoHfQuantizer:
+ """
+ The Auto-HF quantizer class that takes care of automatically instantiating to the correct
+ `HfQuantizer` given the `QuantizationConfig`.
+ """
+
+ @classmethod
+ def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
+ # Convert it to a QuantizationConfig if the q_config is a dict
+ if isinstance(quantization_config, dict):
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
+
+ quant_method = quantization_config.quant_method
+
+ # Again, we need a special care for bnb as we have a single quantization config
+ # class for both 4-bit and 8-bit quantization
+ if quant_method == QuantizationMethod.BITS_AND_BYTES:
+ if quantization_config.load_in_8bit:
+ quant_method += "_8bit"
+ else:
+ quant_method += "_4bit"
+
+ if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
+ raise ValueError(
+ f"Unknown quantization type, got {quant_method} - supported types are:"
+ f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
+ )
+
+ target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
+ return target_cls(quantization_config, **kwargs)
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
+ quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
+ return cls.from_config(quantization_config)
+
+ @classmethod
+ def merge_quantization_configs(
+ cls,
+ quantization_config: Union[dict, QuantizationConfigMixin],
+ quantization_config_from_args: Optional[QuantizationConfigMixin],
+ ):
+ """
+ handles situations where both quantization_config from args and quantization_config from model config are present.
+ """
+ if quantization_config_from_args is not None:
+ warning_msg = (
+ "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
+ " already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
+ )
+ else:
+ warning_msg = ""
+
+ if isinstance(quantization_config, dict):
+ quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
+
+ if isinstance(quantization_config, (GPTQConfig, AwqConfig)) and quantization_config_from_args is not None:
+ # special case for GPTQ / AWQ config collision
+ loading_attr_dict = quantization_config_from_args.get_loading_attributes()
+ for attr, val in loading_attr_dict.items():
+ setattr(quantization_config, attr, val)
+ warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
+
+ if warning_msg != "":
+ warnings.warn(warning_msg)
+
+ return quantization_config
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/base.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..226995eea0ca5bcbbff371169a0dcb48ef036c77
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/base.py
@@ -0,0 +1,213 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+
+from ..utils import is_torch_available
+from ..utils.quantization_config import QuantizationConfigMixin
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+if is_torch_available():
+ import torch
+
+
+class HfQuantizer(ABC):
+ """
+ Abstract class of the HuggingFace quantizer. Supports for now quantizing HF transformers models for inference and/or quantization.
+ This class is used only for transformers.PreTrainedModel.from_pretrained and cannot be easily used outside the scope of that method
+ yet.
+
+ Attributes
+ quantization_config (`transformers.utils.quantization_config.QuantizationConfigMixin`):
+ The quantization config that defines the quantization parameters of your model that you want to quantize.
+ modules_to_not_convert (`List[str]`, *optional*):
+ The list of module names to not convert when quantizing the model.
+ required_packages (`List[str]`, *optional*):
+ The list of required pip packages to install prior to using the quantizer
+ requires_calibration (`bool`):
+ Whether the quantization method requires to calibrate the model before using it.
+ requires_parameters_quantization (`bool`):
+ Whether the quantization method requires to create a new Parameter. For example, for bitsandbytes, it is
+ required to create a new xxxParameter in order to properly quantize the model.
+ """
+
+ requires_calibration = False
+ required_packages = None
+ requires_parameters_quantization = False
+
+ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
+ self.quantization_config = quantization_config
+
+ # -- Handle extra kwargs below --
+ self.modules_to_not_convert = kwargs.pop("modules_to_not_convert", [])
+ self.pre_quantized = kwargs.pop("pre_quantized", True)
+
+ if not self.pre_quantized and self.requires_calibration:
+ raise ValueError(
+ f"The quantization method {quantization_config.quant_method} does require the model to be pre-quantized."
+ f" You explicitly passed `pre_quantized=False` meaning your model weights are not quantized. Make sure to "
+ f"pass `pre_quantized=True` while knowing what you are doing."
+ )
+
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ """
+ Some quantization methods require to explicitly set the dtype of the model to a
+ target dtype. You need to override this method in case you want to make sure that behavior is
+ preserved
+
+ Args:
+ torch_dtype (`torch.dtype`):
+ The input dtype that is passed in `from_pretrained`
+ """
+ return torch_dtype
+
+ def update_device_map(self, device_map: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ """
+ Override this method if you want to pass a override the existing device map with a new
+ one. E.g. for bitsandbytes, since `accelerate` is a hard requirement, if no device_map is
+ passed, the device_map is set to `"auto"``
+
+ Args:
+ device_map (`Union[dict, str]`, *optional*):
+ The device_map that is passed through the `from_pretrained` method.
+ """
+ return device_map
+
+ def adjust_target_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ """
+ Override this method if you want to adjust the `target_dtype` variable used in `from_pretrained`
+ to compute the device_map in case the device_map is a `str`. E.g. for bitsandbytes we force-set `target_dtype`
+ to `torch.int8` and for 4-bit we pass a custom enum `accelerate.CustomDtype.int4`.
+
+ Args:
+ torch_dtype (`torch.dtype`, *optional*):
+ The torch_dtype that is used to compute the device_map.
+ """
+ return torch_dtype
+
+ def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
+ """
+ Override this method if you want to adjust the `missing_keys`.
+
+ Args:
+ missing_keys (`List[str]`, *optional*):
+ The list of missing keys in the checkpoint compared to the state dict of the model
+ """
+ return missing_keys
+
+ def get_special_dtypes_update(self, model, torch_dtype: "torch.dtype") -> Dict[str, "torch.dtype"]:
+ """
+ returns dtypes for modules that are not quantized - used for the computation of the device_map in case
+ one passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified
+ in `_process_model_before_weight_loading`.
+
+ Args:
+ model (`~transformers.PreTrainedModel`):
+ The model to quantize
+ torch_dtype (`torch.dtype`):
+ The dtype passed in `from_pretrained` method.
+ """
+
+ return {
+ name: torch_dtype
+ for name, _ in model.named_parameters()
+ if any(m in name for m in self.modules_to_not_convert)
+ }
+
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
+ """adjust max_memory argument for infer_auto_device_map() if extra memory is needed for quantization"""
+ return max_memory
+
+ def check_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ state_dict: Dict[str, Any],
+ **kwargs,
+ ) -> bool:
+ """
+ checks if a loaded state_dict component is part of quantized param + some validation; only defined if
+ requires_parameters_quantization == True for quantization methods that require to create a new parameters
+ for quantization.
+ """
+ return False
+
+ def create_quantized_param(self, *args, **kwargs) -> "torch.nn.Parameter":
+ """
+ takes needed components from state_dict and creates quantized param; only applicable if
+ requires_parameters_quantization == True
+ """
+ if not self.requires_parameters_quantization:
+ raise AttributeError(
+ f"`.create_quantized_param()` method is not supported by quantizer class {self.__class__.__name__}."
+ )
+
+ def validate_environment(self, *args, **kwargs):
+ """
+ This method is used to potentially check for potential conflicts with arguments that are
+ passed in `from_pretrained`. You need to define it for all future quantizers that are integrated with transformers.
+ If no explicit check are needed, simply return nothing.
+ """
+ return
+
+ def preprocess_model(self, model: "PreTrainedModel", **kwargs):
+ """
+ Setting model attributes and/or converting model before weights loading. At this point
+ the model should be initialized on the meta device so you can freely manipulate the skeleton
+ of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
+
+ Args:
+ model (`~transformers.PreTrainedModel`):
+ The model to quantize
+ kwargs (`dict`, *optional*):
+ The keyword arguments that are passed along `_process_model_before_weight_loading`.
+ """
+ model.is_quantized = True
+ model.quantization_method = self.quantization_config.quant_method
+ return self._process_model_before_weight_loading(model, **kwargs)
+
+ def postprocess_model(self, model: "PreTrainedModel", **kwargs):
+ """
+ Post-process the model post weights loading.
+ Make sure to override the abstract method `_process_model_after_weight_loading`.
+
+ Args:
+ model (`~transformers.PreTrainedModel`):
+ The model to quantize
+ kwargs (`dict`, *optional*):
+ The keyword arguments that are passed along `_process_model_after_weight_loading`.
+ """
+ return self._process_model_after_weight_loading(model, **kwargs)
+
+ @abstractmethod
+ def _process_model_before_weight_loading(self, model, **kwargs):
+ ...
+
+ @abstractmethod
+ def _process_model_after_weight_loading(self, model, **kwargs):
+ ...
+
+ @property
+ @abstractmethod
+ def is_serializable(self):
+ ...
+
+ @property
+ @abstractmethod
+ def is_trainable(self):
+ ...
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..530071616115ac9ab58cdcbdfcf38b90589b10c1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_aqlm.py
@@ -0,0 +1,98 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+from typing import TYPE_CHECKING, Optional
+
+from packaging import version
+
+from .base import HfQuantizer
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..integrations import replace_with_aqlm_linear
+from ..utils import is_accelerate_available, is_aqlm_available, is_torch_available, logging
+from ..utils.quantization_config import QuantizationConfigMixin
+
+
+if is_torch_available():
+ import torch
+
+logger = logging.get_logger(__name__)
+
+
+class AqlmHfQuantizer(HfQuantizer):
+ """
+ Quantizer of the AQLM method. Enables the loading of prequantized models.
+ """
+
+ requires_calibration = True
+ required_packages = ["aqlm"]
+ optimum_quantizer = None
+
+ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+ self.quantization_config = quantization_config
+
+ def validate_environment(self, *args, **kwargs):
+ if not is_accelerate_available():
+ raise ImportError("Using `aqlm` quantization requires Accelerate: `pip install accelerate`")
+
+ if not is_aqlm_available():
+ raise ImportError("Using `aqlm` quantization requires AQLM: `pip install aqlm[gpu,cpu]`")
+
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ if torch_dtype is None:
+ if torch.cuda.is_available():
+ torch_dtype = torch.float16
+ logger.info(
+ "CUDA available. Assuming AQLM inference on GPU and loading the model in `torch.float16`. To overwrite it, set `torch_dtype` manually."
+ )
+ else:
+ torch_dtype = torch.float32
+ logger.info(
+ "CUDA is unavailable. Assuming AQLM inference on CPU and loading the model in `torch.float32`. To overwrite it, set `torch_dtype` manually."
+ )
+ return torch_dtype
+
+ def _process_model_before_weight_loading(
+ self,
+ model: "PreTrainedModel",
+ **kwargs,
+ ):
+ replace_with_aqlm_linear(
+ model,
+ quantization_config=self.quantization_config,
+ linear_weights_not_to_quantize=self.quantization_config.linear_weights_not_to_quantize,
+ )
+ model.config.quantization_config = self.quantization_config
+
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ return model
+
+ @property
+ def is_trainable(self, model: Optional["PreTrainedModel"] = None):
+ aqlm_supports_training = version.parse(importlib.metadata.version("aqlm")) >= version.parse("1.0.2")
+ if aqlm_supports_training:
+ return True
+ else:
+ logger.warning(
+ f"Currently installed `aqlm` version ({importlib.metadata.version('aqlm')}) doesn't support training. If you wish to train a quantized model, please update `aqlm` with `pip install aqlm>=1.0.2`"
+ )
+ return False
+
+ @property
+ def is_serializable(self):
+ return True
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e66f9baf1c0a76fd44e56ca9bdfb7d6fddc236e
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_awq.py
@@ -0,0 +1,124 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib.metadata
+from typing import TYPE_CHECKING
+
+from packaging import version
+
+from .base import HfQuantizer
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..utils import is_accelerate_available, is_auto_awq_available, is_torch_available, logging
+from ..utils.quantization_config import AWQLinearVersion
+
+
+if is_torch_available():
+ import torch
+
+logger = logging.get_logger(__name__)
+
+
+class AwqQuantizer(HfQuantizer):
+ """
+ 4-bit quantization for Activation-aware Weight Quantization(AWQ) (https://arxiv.org/abs/2306.00978)
+ """
+
+ # AWQ requires data callibration - we support only inference
+ requires_calibration = True
+
+ required_packages = ["awq", "accelerate"]
+
+ def __init__(self, quantization_config, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+
+ def validate_environment(self, device_map, **kwargs):
+ if not torch.cuda.is_available():
+ raise RuntimeError("GPU is required to run AWQ quantized model.")
+
+ if not is_auto_awq_available():
+ raise ImportError("Loading an AWQ quantized model requires auto-awq library (`pip install autoawq`)")
+
+ if not is_accelerate_available():
+ raise ImportError("Loading an AWQ quantized model requires accelerate (`pip install accelerate`)")
+
+ if device_map is None:
+ logger.warning_once(
+ "You have loaded an AWQ model on CPU and have a CUDA device available, make sure to set "
+ "your model on a GPU device in order to run your model."
+ )
+ elif device_map is not None:
+ if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
+ raise ValueError(
+ "You are attempting to load an AWQ model with a device_map that contains a CPU or disk device."
+ " This is not supported. Please remove the CPU or disk device from the device_map."
+ )
+
+ def update_torch_dtype(self, torch_dtype):
+ if torch_dtype is None:
+ torch_dtype = torch.float16
+ elif torch_dtype != torch.float16:
+ logger.warning("We suggest you to set `torch_dtype=torch.float16` for better efficiency with AWQ.")
+ return torch_dtype
+
+ def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ from ..integrations import get_keys_to_not_convert, replace_with_awq_linear
+
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
+
+ if self.quantization_config.modules_to_not_convert is not None:
+ self.modules_to_not_convert.extend(self.quantization_config.modules_to_not_convert)
+
+ model, has_been_replaced = replace_with_awq_linear(
+ model, quantization_config=self.quantization_config, modules_to_not_convert=self.modules_to_not_convert
+ )
+
+ if not has_been_replaced:
+ logger.warning(
+ "You are loading an AWQ model but no linear modules were found in your model."
+ " Please double check your model architecture, or submit an issue on github if you think this is a bug."
+ )
+
+ def _process_model_after_weight_loading(self, model):
+ if self.quantization_config.do_fuse:
+ from ..integrations import fuse_awq_modules
+
+ model = fuse_awq_modules(model, self.quantization_config)
+ model._awq_is_fused = True # TODO: consider storing this flag in model.config instead
+
+ if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
+ from ..integrations import post_init_awq_exllama_modules
+
+ model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config)
+
+ @property
+ def is_serializable(self):
+ # AWQ through auto-awq has been always serializable, except if the model is fused.
+ if self.quantization_config.do_fuse:
+ logger.warning("You cannot save an AWQ model that uses fused modules!")
+ return False
+
+ if self.quantization_config.version == AWQLinearVersion.EXLLAMA:
+ logger.warning("You cannot save an AWQ model that uses Exllama backend!")
+ return False
+
+ return True
+
+ @property
+ def is_trainable(self):
+ # AWQ supports PEFT fine-tuning from version 0.2.0
+ MIN_AWQ_VERSION_FOR_PEFT = "0.2.0"
+ return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py
new file mode 100644
index 0000000000000000000000000000000000000000..112cfd644f15703c4018fa4384ac3fdaf1c1d97a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_4bit.py
@@ -0,0 +1,317 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+
+from packaging import version
+
+from .base import HfQuantizer
+from .quantizers_utils import get_module_from_name
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging
+
+
+if is_torch_available():
+ import torch
+
+ from ..pytorch_utils import Conv1D
+
+logger = logging.get_logger(__name__)
+
+
+class Bnb4BitHfQuantizer(HfQuantizer):
+ """
+ 4-bit quantization from bitsandbytes.py quantization method:
+ before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the
+ layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call
+ saving:
+ from state dict, as usual; saves weights and `quant_state` components
+ loading:
+ need to locate `quant_state` components and pass to Param4bit constructor
+ """
+
+ use_keep_in_fp32_modules = True
+ requires_parameters_quantization = True
+ requires_calibration = False
+
+ required_packages = ["bitsandbytes", "accelerate"]
+
+ def __init__(self, quantization_config, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+
+ if self.quantization_config.llm_int8_skip_modules is not None:
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
+
+ def validate_environment(self, *args, **kwargs):
+ if not (is_accelerate_available() and is_bitsandbytes_available()):
+ raise ImportError(
+ "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` "
+ "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`"
+ )
+
+ if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
+ raise ValueError(
+ "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
+ " sure the weights are in PyTorch format."
+ )
+
+ if not torch.cuda.is_available():
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
+
+ device_map = kwargs.get("device_map", None)
+ if (
+ device_map is not None
+ and isinstance(device_map, dict)
+ and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
+ ):
+ device_map_without_lm_head = {
+ key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
+ }
+ if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
+ raise ValueError(
+ """
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the
+ quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules
+ in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to
+ `from_pretrained`. Check
+ https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
+ for more details.
+ """
+ )
+
+ if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"):
+ raise ValueError(
+ "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training"
+ " make sure you have the latest version of `bitsandbytes` installed"
+ )
+
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
+ if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"):
+ from accelerate.utils import CustomDtype
+
+ if target_dtype != torch.int8:
+ logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization")
+ return CustomDtype.INT4
+ else:
+ raise ValueError(
+ "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute"
+ " the appropriate device map, you should upgrade your `accelerate` library,"
+ "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map"
+ "calculation. You may encounter unexpected behavior, or pass your own device map"
+ )
+
+ def check_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ state_dict: Dict[str, Any],
+ **kwargs,
+ ) -> bool:
+ import bitsandbytes as bnb
+
+ module, tensor_name = get_module_from_name(model, param_name)
+ if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit):
+ # Add here check for loaded components' dtypes once serialization is implemented
+ return True
+ elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias":
+ # bias could be loaded by regular set_module_tensor_to_device() from accelerate,
+ # but it would wrongly use uninitialized weight there.
+ return True
+ else:
+ return False
+
+ def create_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ target_device: "torch.device",
+ state_dict: Dict[str, Any],
+ unexpected_keys: Optional[List[str]] = None,
+ ):
+ """
+ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
+ """
+ import bitsandbytes as bnb
+
+ module, tensor_name = get_module_from_name(model, param_name)
+
+ if tensor_name not in module._parameters:
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
+
+ old_value = getattr(module, tensor_name)
+
+ if tensor_name == "bias":
+ if param_value is None:
+ new_value = old_value.to(target_device)
+ else:
+ new_value = param_value.to(target_device)
+
+ new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad)
+ module._parameters[tensor_name] = new_value
+ return
+
+ if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
+ raise ValueError("this function only loads `Linear4bit components`")
+ if (
+ old_value.device == torch.device("meta")
+ and target_device not in ["meta", torch.device("meta")]
+ and param_value is None
+ ):
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
+
+ # construct `new_value` for the module._parameters[tensor_name]:
+ if self.pre_quantized:
+ # 4bit loading. Collecting components for restoring quantized weight
+ # This can be expanded to make a universal call for any quantized weight loading
+
+ if not self.is_serializable:
+ raise ValueError(
+ "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
+ )
+
+ if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and (
+ param_name + ".quant_state.bitsandbytes__nf4" not in state_dict
+ ):
+ raise ValueError(
+ f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components."
+ )
+
+ quantized_stats = {}
+ for k, v in state_dict.items():
+ if param_name + "." in k:
+ quantized_stats[k] = v
+ if unexpected_keys is not None and k in unexpected_keys:
+ unexpected_keys.remove(k)
+
+ new_value = bnb.nn.Params4bit.from_prequantized(
+ data=param_value,
+ quantized_stats=quantized_stats,
+ requires_grad=False,
+ device=target_device,
+ )
+ else:
+ new_value = param_value.to("cpu")
+
+ # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
+ # Since weights are saved in the correct "orientation", we skip transposing when loading.
+ if issubclass(module.source_cls, Conv1D):
+ new_value = new_value.T
+
+ kwargs = old_value.__dict__
+ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device)
+
+ module._parameters[tensor_name] = new_value
+
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
+ # need more space for buffers that are created during quantization
+ max_memory = {key: val * 0.90 for key, val in max_memory.items()}
+ return max_memory
+
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_torch_dtype
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ if torch_dtype is None:
+ # We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
+ logger.info(
+ "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
+ "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
+ "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
+ " torch_dtype=torch.float16 to remove this warning.",
+ torch_dtype,
+ )
+ torch_dtype = torch.float16
+ return torch_dtype
+
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_device_map
+ def update_device_map(self, device_map):
+ if device_map is None:
+ device_map = {"": torch.cuda.current_device()}
+ logger.info(
+ "The device_map was not initialized. "
+ "Setting device_map to {'':torch.cuda.current_device()}. "
+ "If you want to use the model for inference, please set device_map ='auto' "
+ )
+ return device_map
+
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading
+ def _process_model_before_weight_loading(
+ self,
+ model: "PreTrainedModel",
+ device_map,
+ keep_in_fp32_modules: List[str] = [],
+ **kwargs,
+ ):
+ from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear
+
+ load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
+
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
+ if self.quantization_config.llm_int8_skip_modules is None:
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
+ else:
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
+
+ if not isinstance(self.modules_to_not_convert, list):
+ self.modules_to_not_convert = [self.modules_to_not_convert]
+
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
+
+ # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
+ keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
+
+ if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
+ raise ValueError(
+ "If you want to offload some keys to `cpu` or `disk`, you need to set "
+ "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
+ " converted to 8-bit but kept in 32-bit."
+ )
+ self.modules_to_not_convert.extend(keys_on_cpu)
+
+ model = replace_with_bnb_linear(
+ model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
+ )
+ # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here
+
+ model.config.quantization_config = self.quantization_config
+
+ # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ model.is_loaded_in_4bit = True
+ model.is_4bit_serializable = self.is_serializable
+ return model
+
+ @property
+ def is_serializable(self):
+ _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3")
+
+ if not _is_4bit_serializable:
+ logger.warning(
+ "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. "
+ "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed."
+ )
+ return False
+
+ return True
+
+ @property
+ def is_trainable(self) -> bool:
+ return True
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py
new file mode 100644
index 0000000000000000000000000000000000000000..b80e9bd3a1dfa27694bab0132c9b9306ee3f0e1f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py
@@ -0,0 +1,285 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+
+from packaging import version
+
+from .base import HfQuantizer
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..utils import is_accelerate_available, is_bitsandbytes_available, is_torch_available, logging
+from .quantizers_utils import get_module_from_name
+
+
+if is_torch_available():
+ import torch
+
+ from ..pytorch_utils import Conv1D
+
+logger = logging.get_logger(__name__)
+
+
+class Bnb8BitHfQuantizer(HfQuantizer):
+ """
+ 8-bit quantization from bitsandbytes quantization method:
+ before loading: converts transformer layers into Linear8bitLt during loading: load 16bit weight and pass to the
+ layer object after: quantizes individual weights in Linear8bitLt into 8bit at fitst .cuda() call
+ saving:
+ from state dict, as usual; saves weights and 'SCB' component
+ loading:
+ need to locate SCB component and pass to the Linear8bitLt object
+ """
+
+ use_keep_in_fp32_modules = True
+ requires_parameters_quantization = True
+ requires_calibration = False
+
+ required_packages = ["bitsandbytes", "accelerate"]
+
+ def __init__(self, quantization_config, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+
+ if self.quantization_config.llm_int8_skip_modules is not None:
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
+
+ def validate_environment(self, *args, **kwargs):
+ if not (is_accelerate_available() and is_bitsandbytes_available()):
+ raise ImportError(
+ "Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install accelerate` "
+ "and the latest version of bitsandbytes: `pip install -i https://pypi.org/simple/ bitsandbytes`"
+ )
+
+ if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
+ raise ValueError(
+ "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
+ " sure the weights are in PyTorch format."
+ )
+
+ if not torch.cuda.is_available():
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
+
+ device_map = kwargs.get("device_map", None)
+ if (
+ device_map is not None
+ and isinstance(device_map, dict)
+ and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
+ ):
+ device_map_without_lm_head = {
+ key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
+ }
+ if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
+ raise ValueError(
+ """
+ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the
+ quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules
+ in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to
+ `from_pretrained`. Check
+ https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
+ for more details.
+ """
+ )
+
+ if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.2"):
+ raise ValueError(
+ "You have a version of `bitsandbytes` that is not compatible with 8bit inference and training"
+ " make sure you have the latest version of `bitsandbytes` installed"
+ )
+
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
+ # need more space for buffers that are created during quantization
+ max_memory = {key: val * 0.90 for key, val in max_memory.items()}
+ return max_memory
+
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ if torch_dtype is None:
+ # We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
+ logger.info(
+ "Overriding torch_dtype=%s with `torch_dtype=torch.float16` due to "
+ "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
+ "Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
+ " torch_dtype=torch.float16 to remove this warning.",
+ torch_dtype,
+ )
+ torch_dtype = torch.float16
+ return torch_dtype
+
+ def update_device_map(self, device_map):
+ if device_map is None:
+ device_map = {"": torch.cuda.current_device()}
+ logger.info(
+ "The device_map was not initialized. "
+ "Setting device_map to {'':torch.cuda.current_device()}. "
+ "If you want to use the model for inference, please set device_map ='auto' "
+ )
+ return device_map
+
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
+ if target_dtype != torch.int8:
+ logger.info("target_dtype {target_dtype} is replaced by `torch.int8` for 8-bit BnB quantization")
+ return torch.int8
+
+ def check_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ state_dict: Dict[str, Any],
+ **kwargs,
+ ):
+ import bitsandbytes as bnb
+
+ module, tensor_name = get_module_from_name(model, param_name)
+ if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Int8Params):
+ if self.pre_quantized:
+ if param_name.replace("weight", "SCB") not in state_dict.keys():
+ raise ValueError("Missing quantization component `SCB`")
+ if param_value.dtype != torch.int8:
+ raise ValueError(
+ f"Incompatible dtype `{param_value.dtype}` when loading 8-bit prequantized weight. Expected `torch.int8`."
+ )
+ return True
+ return False
+
+ def create_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ target_device: "torch.device",
+ state_dict: Dict[str, Any],
+ unexpected_keys: Optional[List[str]] = None,
+ ):
+ """
+ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
+ needs aux items from state dicts, if found - removes them from unexpected_keys
+ """
+ import bitsandbytes as bnb
+
+ fp16_statistics_key = param_name.replace("weight", "SCB")
+ fp16_weights_format_key = param_name.replace("weight", "weight_format")
+
+ fp16_statistics = state_dict.get(fp16_statistics_key, None)
+ fp16_weights_format = state_dict.get(fp16_weights_format_key, None)
+
+ module, tensor_name = get_module_from_name(model, param_name)
+ if tensor_name not in module._parameters:
+ raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")
+
+ old_value = getattr(module, tensor_name)
+
+ if not isinstance(module._parameters[tensor_name], bnb.nn.Int8Params):
+ raise ValueError(f"Parameter `{tensor_name}` should only be a `bnb.nn.Int8Params` instance.")
+ if (
+ old_value.device == torch.device("meta")
+ and target_device not in ["meta", torch.device("meta")]
+ and param_value is None
+ ):
+ raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")
+
+ new_value = param_value.to("cpu")
+ if self.pre_quantized and not self.is_serializable:
+ raise ValueError(
+ "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
+ "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
+ )
+
+ # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
+ # Since weights are saved in the correct "orientation", we skip transposing when loading.
+ if issubclass(module.source_cls, Conv1D):
+ if fp16_statistics is None:
+ new_value = new_value.T
+
+ kwargs = old_value.__dict__
+ new_value = bnb.nn.Int8Params(new_value, requires_grad=False, **kwargs).to(target_device)
+
+ module._parameters[tensor_name] = new_value
+ if fp16_statistics is not None:
+ setattr(module.weight, "SCB", fp16_statistics.to(target_device))
+ if unexpected_keys is not None:
+ unexpected_keys.remove(fp16_statistics_key)
+
+ # We just need to pop the `weight_format` keys from the state dict to remove unneeded
+ # messages. The correct format is correctly retrieved during the first forward pass.
+ if fp16_weights_format is not None and unexpected_keys is not None:
+ unexpected_keys.remove(fp16_weights_format_key)
+
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ model.is_loaded_in_8bit = True
+ model.is_8bit_serializable = self.is_serializable
+ return model
+
+ def _process_model_before_weight_loading(
+ self,
+ model: "PreTrainedModel",
+ device_map,
+ keep_in_fp32_modules: List[str] = [],
+ **kwargs,
+ ):
+ from ..integrations import get_keys_to_not_convert, replace_with_bnb_linear
+
+ load_in_8bit_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload
+
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
+ if self.quantization_config.llm_int8_skip_modules is None:
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
+ else:
+ self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules
+
+ if not isinstance(self.modules_to_not_convert, list):
+ self.modules_to_not_convert = [self.modules_to_not_convert]
+
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
+
+ # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk`
+ if isinstance(device_map, dict) and len(device_map.keys()) > 1:
+ keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
+
+ if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
+ raise ValueError(
+ "If you want to offload some keys to `cpu` or `disk`, you need to set "
+ "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
+ " converted to 8-bit but kept in 32-bit."
+ )
+ self.modules_to_not_convert.extend(keys_on_cpu)
+
+ model = replace_with_bnb_linear(
+ model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
+ )
+ # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here
+
+ model.config.quantization_config = self.quantization_config
+
+ @property
+ def is_serializable(self):
+ _bnb_supports_8bit_serialization = version.parse(importlib.metadata.version("bitsandbytes")) > version.parse(
+ "0.37.2"
+ )
+
+ if not _bnb_supports_8bit_serialization:
+ logger.warning(
+ "You are calling `save_pretrained` to a 8-bit converted model, but your `bitsandbytes` version doesn't support it. "
+ "If you want to save 8-bit models, make sure to have `bitsandbytes>0.37.2` installed. You will most likely face errors or"
+ " unexpected behaviours."
+ )
+ return False
+
+ return True
+
+ @property
+ def is_trainable(self) -> bool:
+ return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.37.0")
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffc6f2090a8a7b3d6ac896655fda4f6c0632dea7
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_gptq.py
@@ -0,0 +1,94 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+from typing import TYPE_CHECKING, Optional
+
+from packaging import version
+
+from .base import HfQuantizer
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..utils import is_auto_gptq_available, is_optimum_available, is_torch_available, logging
+from ..utils.quantization_config import GPTQConfig, QuantizationConfigMixin
+
+
+if is_torch_available():
+ import torch
+
+logger = logging.get_logger(__name__)
+
+
+class GptqHfQuantizer(HfQuantizer):
+ """
+ Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through
+ `auto_gptq` package. Quantization is done under the hood for users if they load a non-prequantized model.
+ """
+
+ requires_calibration = False
+ required_packages = ["optimum", "auto_gptq"]
+ optimum_quantizer = None
+
+ def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+ from optimum.gptq import GPTQQuantizer
+
+ self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum())
+
+ def validate_environment(self, *args, **kwargs):
+ gptq_supports_cpu = version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2")
+ if not gptq_supports_cpu and not torch.cuda.is_available():
+ raise RuntimeError("GPU is required to quantize or run quantize model.")
+ elif not (is_optimum_available() and is_auto_gptq_available()):
+ raise ImportError(
+ "Loading a GPTQ quantized model requires optimum (`pip install optimum`) and auto-gptq library (`pip install auto-gptq`)"
+ )
+ elif version.parse(importlib.metadata.version("auto_gptq")) < version.parse("0.4.2"):
+ raise ImportError(
+ "You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq`"
+ )
+
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ if torch_dtype is None:
+ torch_dtype = torch.float16
+ elif torch_dtype != torch.float16:
+ logger.info("We suggest you to set `torch_dtype=torch.float16` for better efficiency with GPTQ.")
+ return torch_dtype
+
+ def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ if model.__class__.main_input_name != "input_ids":
+ raise RuntimeError("We can only quantize pure text model.")
+
+ if self.pre_quantized:
+ model = self.optimum_quantizer.convert_model(model)
+
+ def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
+ if self.pre_quantized:
+ model = self.optimum_quantizer.post_init_model(model)
+ else:
+ if self.quantization_config.tokenizer is None:
+ self.quantization_config.tokenizer = model.name_or_path
+
+ self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer)
+ model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict())
+
+ @property
+ def is_trainable(self, model: Optional["PreTrainedModel"] = None):
+ return True
+
+ @property
+ def is_serializable(self):
+ return True
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7e2219ab6621715e3275312d547ec2bfbd4b676
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizer_quanto.py
@@ -0,0 +1,200 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import importlib
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
+
+from packaging import version
+
+from .base import HfQuantizer
+from .quantizers_utils import get_module_from_name
+
+
+if TYPE_CHECKING:
+ from ..modeling_utils import PreTrainedModel
+
+from ..utils import is_accelerate_available, is_quanto_available, is_torch_available, logging
+from ..utils.quantization_config import QuantoConfig
+
+
+if is_torch_available():
+ import torch
+
+logger = logging.get_logger(__name__)
+
+
+class QuantoHfQuantizer(HfQuantizer):
+ """
+ Quantizer for the quanto library
+ """
+
+ required_packages = ["quanto", "accelerate"]
+ requires_parameters_quantization = True
+ requires_calibration = False
+
+ def __init__(self, quantization_config: QuantoConfig, **kwargs):
+ super().__init__(quantization_config, **kwargs)
+ self.post_init()
+
+ def post_init(self):
+ r"""
+ Safety checker
+ """
+ if self.quantization_config.activations is not None and not self.pre_quantized:
+ raise ValueError(
+ "We don't support quantizing the activations with transformers library."
+ "Use quanto library for more complex use cases such as activations quantization, calibration and quantization aware training."
+ )
+
+ def validate_environment(self, *args, **kwargs):
+ if not is_quanto_available():
+ raise ImportError("Loading a quanto quantized model requires quanto library (`pip install quanto`)")
+ if not is_accelerate_available():
+ raise ImportError(
+ "Loading a quanto quantized model requires accelerate library (`pip install accelerate`)"
+ )
+
+ def update_device_map(self, device_map):
+ if device_map is None:
+ device_map = {"": "cpu"}
+ logger.info(
+ "The device_map was not initialized. "
+ "Setting device_map to {'':'cpu'}. "
+ "If you want to use the model for inference, please set device_map ='auto'"
+ )
+ return device_map
+
+ def update_torch_dtype(self, torch_dtype: "torch.dtype") -> "torch.dtype":
+ if torch_dtype is None:
+ logger.info("You did not specify `torch_dtype` in `from_pretrained`. Setting it to `torch.float32`.")
+ torch_dtype = torch.float32
+ return torch_dtype
+
+ def update_missing_keys(self, model, missing_keys: List[str], prefix: str) -> List[str]:
+ import quanto
+
+ not_missing_keys = []
+ for name, module in model.named_modules():
+ if isinstance(module, quanto.QModuleMixin):
+ for missing in missing_keys:
+ if (
+ (name in missing or name in f"{prefix}.{missing}")
+ and not missing.endswith(".weight")
+ and not missing.endswith(".bias")
+ ):
+ not_missing_keys.append(missing)
+ return [k for k in missing_keys if k not in not_missing_keys]
+
+ def check_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ state_dict: Dict[str, Any],
+ **kwargs,
+ ) -> bool:
+ """
+ Check if a parameter needs to be quantized.
+ """
+ import quanto
+
+ device_map = kwargs.get("device_map", None)
+ param_device = kwargs.get("param_device", None)
+ # we don't quantize the model if the module is going to be offloaded to the cpu
+ if device_map is not None and param_device is not None:
+ device_map_values = set(device_map.values())
+ if param_device == "cpu" and len(device_map_values) > 1:
+ if not (device_map_values == {"cpu"} or device_map_values == {"cpu", "disk"}):
+ return False
+
+ module, tensor_name = get_module_from_name(model, param_name)
+ # We only quantize the weights and the bias is not quantized.
+ if isinstance(module, quanto.QModuleMixin) and "weight" in tensor_name:
+ # if the weights are quantized, don't need to recreate it again with `create_quantized_param`
+ return not module.frozen
+ else:
+ return False
+
+ def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
+ max_memory = {key: val * 0.90 for key, val in max_memory.items()}
+ return max_memory
+
+ def create_quantized_param(
+ self,
+ model: "PreTrainedModel",
+ param_value: "torch.Tensor",
+ param_name: str,
+ target_device: "torch.device",
+ *args,
+ **kwargs,
+ ):
+ """
+ Create the quantized parameter by calling .freeze() after setting it to the module.
+ """
+ from accelerate.utils import set_module_tensor_to_device
+
+ set_module_tensor_to_device(model, param_name, target_device, param_value)
+ module, _ = get_module_from_name(model, param_name)
+ module.freeze()
+ module.weight.requires_grad = False
+
+ def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
+ if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.27.0"):
+ from accelerate.utils import CustomDtype
+
+ mapping = {
+ "int8": torch.int8,
+ "float8": CustomDtype.FP8,
+ "int4": CustomDtype.INT4,
+ "int2": CustomDtype.INT2,
+ }
+ target_dtype = mapping[self.quantization_config.weights]
+ return target_dtype
+ else:
+ raise ValueError(
+ "You are using `device_map='auto'` on a quanto quantized model. To automatically compute"
+ " the appropriate device map, you should upgrade your `accelerate` library,"
+ "`pip install --upgrade accelerate` or install it from source."
+ )
+
+ def _process_model_before_weight_loading(
+ self, model: "PreTrainedModel", keep_in_fp32_modules: List[str] = [], **kwargs
+ ):
+ from ..integrations import get_keys_to_not_convert, replace_with_quanto_layers
+
+ # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
+ if self.quantization_config.modules_to_not_convert is None:
+ self.modules_to_not_convert = get_keys_to_not_convert(model)
+ else:
+ self.modules_to_not_convert = self.quantization_config.modules_to_not_convert
+
+ if not isinstance(self.modules_to_not_convert, list):
+ self.modules_to_not_convert = [self.modules_to_not_convert]
+
+ self.modules_to_not_convert.extend(keep_in_fp32_modules)
+
+ model, _ = replace_with_quanto_layers(
+ model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config
+ )
+ model.config.quantization_config = self.quantization_config
+
+ def _process_model_after_weight_loading(self, model):
+ return model
+
+ @property
+ def is_trainable(self, model: Optional["PreTrainedModel"] = None):
+ return False
+
+ @property
+ def is_serializable(self):
+ return False
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ae287bf251b51337b8588b2e0176178316e7e96
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/quantizers/quantizers_utils.py
@@ -0,0 +1,26 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, Tuple
+
+
+def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]:
+ if "." in tensor_name:
+ splits = tensor_name.split(".")
+ for split in splits[:-1]:
+ new_module = getattr(module, split)
+ if new_module is None:
+ raise ValueError(f"{module} has no attribute {split}.")
+ module = new_module
+ tensor_name = splits[-1]
+ return module, tensor_name
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..68d66eb275e0b6fef2db1cdda810fe11e360aba9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__init__.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ..utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "agents": ["Agent", "AzureOpenAiAgent", "HfAgent", "LocalAgent", "OpenAiAgent"],
+ "base": ["PipelineTool", "RemoteTool", "Tool", "launch_gradio_demo", "load_tool"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["document_question_answering"] = ["DocumentQuestionAnsweringTool"]
+ _import_structure["image_captioning"] = ["ImageCaptioningTool"]
+ _import_structure["image_question_answering"] = ["ImageQuestionAnsweringTool"]
+ _import_structure["image_segmentation"] = ["ImageSegmentationTool"]
+ _import_structure["speech_to_text"] = ["SpeechToTextTool"]
+ _import_structure["text_classification"] = ["TextClassificationTool"]
+ _import_structure["text_question_answering"] = ["TextQuestionAnsweringTool"]
+ _import_structure["text_summarization"] = ["TextSummarizationTool"]
+ _import_structure["text_to_speech"] = ["TextToSpeechTool"]
+ _import_structure["translation"] = ["TranslationTool"]
+
+if TYPE_CHECKING:
+ from .agents import Agent, AzureOpenAiAgent, HfAgent, LocalAgent, OpenAiAgent
+ from .base import PipelineTool, RemoteTool, Tool, launch_gradio_demo, load_tool
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .document_question_answering import DocumentQuestionAnsweringTool
+ from .image_captioning import ImageCaptioningTool
+ from .image_question_answering import ImageQuestionAnsweringTool
+ from .image_segmentation import ImageSegmentationTool
+ from .speech_to_text import SpeechToTextTool
+ from .text_classification import TextClassificationTool
+ from .text_question_answering import TextQuestionAnsweringTool
+ from .text_summarization import TextSummarizationTool
+ from .text_to_speech import TextToSpeechTool
+ from .translation import TranslationTool
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfa7e3daf46117a9e5096dba4ca6af3a4b5a6dc5
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/base.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b3b8b12ca63dba26fc910b5f368486034210e36
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/document_question_answering.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..43a88cd4e1141d309814058713d2bf6822622cda
Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_question_answering.cpython-310.pyc differ
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/agent_types.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/agent_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1c3261d57cacc0d0299467f0fa566340e4b5a94
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/agent_types.py
@@ -0,0 +1,277 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import pathlib
+import tempfile
+import uuid
+
+import numpy as np
+
+from ..utils import is_soundfile_availble, is_torch_available, is_vision_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+if is_vision_available():
+ import PIL.Image
+ from PIL import Image
+ from PIL.Image import Image as ImageType
+else:
+ ImageType = object
+
+if is_torch_available():
+ import torch
+
+if is_soundfile_availble():
+ import soundfile as sf
+
+
+class AgentType:
+ """
+ Abstract class to be reimplemented to define types that can be returned by agents.
+
+ These objects serve three purposes:
+
+ - They behave as they were the type they're meant to be, e.g., a string for text, a PIL.Image for images
+ - They can be stringified: str(object) in order to return a string defining the object
+ - They should be displayed correctly in ipython notebooks/colab/jupyter
+ """
+
+ def __init__(self, value):
+ self._value = value
+
+ def __str__(self):
+ return self.to_string()
+
+ def to_raw(self):
+ logger.error(
+ "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable"
+ )
+ return self._value
+
+ def to_string(self) -> str:
+ logger.error(
+ "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable"
+ )
+ return str(self._value)
+
+
+class AgentText(AgentType, str):
+ """
+ Text type returned by the agent. Behaves as a string.
+ """
+
+ def to_raw(self):
+ return self._value
+
+ def to_string(self):
+ return self._value
+
+
+class AgentImage(AgentType, ImageType):
+ """
+ Image type returned by the agent. Behaves as a PIL.Image.
+ """
+
+ def __init__(self, value):
+ super().__init__(value)
+
+ if not is_vision_available():
+ raise ImportError("PIL must be installed in order to handle images.")
+
+ self._path = None
+ self._raw = None
+ self._tensor = None
+
+ if isinstance(value, ImageType):
+ self._raw = value
+ elif isinstance(value, (str, pathlib.Path)):
+ self._path = value
+ elif isinstance(value, torch.Tensor):
+ self._tensor = value
+ else:
+ raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}")
+
+ def _ipython_display_(self, include=None, exclude=None):
+ """
+ Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...)
+ """
+ from IPython.display import Image, display
+
+ display(Image(self.to_string()))
+
+ def to_raw(self):
+ """
+ Returns the "raw" version of that object. In the case of an AgentImage, it is a PIL.Image.
+ """
+ if self._raw is not None:
+ return self._raw
+
+ if self._path is not None:
+ self._raw = Image.open(self._path)
+ return self._raw
+
+ def to_string(self):
+ """
+ Returns the stringified version of that object. In the case of an AgentImage, it is a path to the serialized
+ version of the image.
+ """
+ if self._path is not None:
+ return self._path
+
+ if self._raw is not None:
+ directory = tempfile.mkdtemp()
+ self._path = os.path.join(directory, str(uuid.uuid4()) + ".png")
+ self._raw.save(self._path)
+
+ return self._path
+
+ if self._tensor is not None:
+ array = self._tensor.cpu().detach().numpy()
+
+ # There is likely simpler than load into image into save
+ img = Image.fromarray((array * 255).astype(np.uint8))
+
+ directory = tempfile.mkdtemp()
+ self._path = os.path.join(directory, str(uuid.uuid4()) + ".png")
+
+ img.save(self._path)
+
+ return self._path
+
+
+class AgentAudio(AgentType):
+ """
+ Audio type returned by the agent.
+ """
+
+ def __init__(self, value, samplerate=16_000):
+ super().__init__(value)
+
+ if not is_soundfile_availble():
+ raise ImportError("soundfile must be installed in order to handle audio.")
+
+ self._path = None
+ self._tensor = None
+
+ self.samplerate = samplerate
+
+ if isinstance(value, (str, pathlib.Path)):
+ self._path = value
+ elif isinstance(value, torch.Tensor):
+ self._tensor = value
+ else:
+ raise ValueError(f"Unsupported audio type: {type(value)}")
+
+ def _ipython_display_(self, include=None, exclude=None):
+ """
+ Displays correctly this type in an ipython notebook (ipython, colab, jupyter, ...)
+ """
+ from IPython.display import Audio, display
+
+ display(Audio(self.to_string(), rate=self.samplerate))
+
+ def to_raw(self):
+ """
+ Returns the "raw" version of that object. It is a `torch.Tensor` object.
+ """
+ if self._tensor is not None:
+ return self._tensor
+
+ if self._path is not None:
+ tensor, self.samplerate = sf.read(self._path)
+ self._tensor = torch.tensor(tensor)
+ return self._tensor
+
+ def to_string(self):
+ """
+ Returns the stringified version of that object. In the case of an AgentAudio, it is a path to the serialized
+ version of the audio.
+ """
+ if self._path is not None:
+ return self._path
+
+ if self._tensor is not None:
+ directory = tempfile.mkdtemp()
+ self._path = os.path.join(directory, str(uuid.uuid4()) + ".wav")
+ sf.write(self._path, self._tensor, samplerate=self.samplerate)
+ return self._path
+
+
+AGENT_TYPE_MAPPING = {"text": AgentText, "image": AgentImage, "audio": AgentAudio}
+INSTANCE_TYPE_MAPPING = {str: AgentText}
+
+if is_vision_available():
+ INSTANCE_TYPE_MAPPING[PIL.Image] = AgentImage
+
+
+def handle_agent_inputs(*args, **kwargs):
+ args = [(arg.to_raw() if isinstance(arg, AgentType) else arg) for arg in args]
+ kwargs = {k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items()}
+ return args, kwargs
+
+
+def handle_agent_outputs(outputs, output_types=None):
+ if isinstance(outputs, dict):
+ decoded_outputs = {}
+ for i, (k, v) in enumerate(outputs.items()):
+ if output_types is not None:
+ # If the class has defined outputs, we can map directly according to the class definition
+ if output_types[i] in AGENT_TYPE_MAPPING:
+ decoded_outputs[k] = AGENT_TYPE_MAPPING[output_types[i]](v)
+ else:
+ decoded_outputs[k] = AgentType(v)
+
+ else:
+ # If the class does not have defined output, then we map according to the type
+ for _k, _v in INSTANCE_TYPE_MAPPING.items():
+ if isinstance(v, _k):
+ decoded_outputs[k] = _v(v)
+ if k not in decoded_outputs:
+ decoded_outputs[k] = AgentType[v]
+
+ elif isinstance(outputs, (list, tuple)):
+ decoded_outputs = type(outputs)()
+ for i, v in enumerate(outputs):
+ if output_types is not None:
+ # If the class has defined outputs, we can map directly according to the class definition
+ if output_types[i] in AGENT_TYPE_MAPPING:
+ decoded_outputs.append(AGENT_TYPE_MAPPING[output_types[i]](v))
+ else:
+ decoded_outputs.append(AgentType(v))
+ else:
+ # If the class does not have defined output, then we map according to the type
+ found = False
+ for _k, _v in INSTANCE_TYPE_MAPPING.items():
+ if isinstance(v, _k):
+ decoded_outputs.append(_v(v))
+ found = True
+
+ if not found:
+ decoded_outputs.append(AgentType(v))
+
+ else:
+ if output_types[0] in AGENT_TYPE_MAPPING:
+ # If the class has defined outputs, we can map directly according to the class definition
+ decoded_outputs = AGENT_TYPE_MAPPING[output_types[0]](outputs)
+
+ else:
+ # If the class does not have defined output, then we map according to the type
+ for _k, _v in INSTANCE_TYPE_MAPPING.items():
+ if isinstance(outputs, _k):
+ return _v(outputs)
+ return AgentType(outputs)
+
+ return decoded_outputs
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/base.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a7d05a0322b7d0d6798d4541acea35e5ad82377
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/base.py
@@ -0,0 +1,765 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import base64
+import importlib
+import inspect
+import io
+import json
+import os
+import tempfile
+from typing import Any, Dict, List, Optional, Union
+
+from huggingface_hub import create_repo, hf_hub_download, metadata_update, upload_folder
+from huggingface_hub.utils import RepositoryNotFoundError, build_hf_headers, get_session
+
+from ..dynamic_module_utils import custom_object_save, get_class_from_dynamic_module, get_imports
+from ..image_utils import is_pil_image
+from ..models.auto import AutoProcessor
+from ..utils import (
+ CONFIG_NAME,
+ cached_file,
+ is_accelerate_available,
+ is_torch_available,
+ is_vision_available,
+ logging,
+)
+from .agent_types import handle_agent_inputs, handle_agent_outputs
+
+
+logger = logging.get_logger(__name__)
+
+if is_torch_available():
+ import torch
+
+if is_accelerate_available():
+ from accelerate import PartialState
+ from accelerate.utils import send_to_device
+
+
+TOOL_CONFIG_FILE = "tool_config.json"
+
+
+def get_repo_type(repo_id, repo_type=None, **hub_kwargs):
+ if repo_type is not None:
+ return repo_type
+ try:
+ hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space", **hub_kwargs)
+ return "space"
+ except RepositoryNotFoundError:
+ try:
+ hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="model", **hub_kwargs)
+ return "model"
+ except RepositoryNotFoundError:
+ raise EnvironmentError(f"`{repo_id}` does not seem to be a valid repo identifier on the Hub.")
+ except Exception:
+ return "model"
+ except Exception:
+ return "space"
+
+
+# docstyle-ignore
+APP_FILE_TEMPLATE = """from transformers import launch_gradio_demo
+from {module_name} import {class_name}
+
+launch_gradio_demo({class_name})
+"""
+
+
+class Tool:
+ """
+ A base class for the functions used by the agent. Subclass this and implement the `__call__` method as well as the
+ following class attributes:
+
+ - **description** (`str`) -- A short description of what your tool does, the inputs it expects and the output(s) it
+ will return. For instance 'This is a tool that downloads a file from a `url`. It takes the `url` as input, and
+ returns the text contained in the file'.
+ - **name** (`str`) -- A performative name that will be used for your tool in the prompt to the agent. For instance
+ `"text-classifier"` or `"image_generator"`.
+ - **inputs** (`List[str]`) -- The list of modalities expected for the inputs (in the same order as in the call).
+ Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo` or to make a
+ nice space from your tool.
+ - **outputs** (`List[str]`) -- The list of modalities returned but the tool (in the same order as the return of the
+ call method). Modalitiies should be `"text"`, `"image"` or `"audio"`. This is only used by `launch_gradio_demo`
+ or to make a nice space from your tool.
+
+ You can also override the method [`~Tool.setup`] if your tool as an expensive operation to perform before being
+ usable (such as loading a model). [`~Tool.setup`] will be called the first time you use your tool, but not at
+ instantiation.
+ """
+
+ description: str = "This is a tool that ..."
+ name: str = ""
+
+ inputs: List[str]
+ outputs: List[str]
+
+ def __init__(self, *args, **kwargs):
+ self.is_initialized = False
+
+ def __call__(self, *args, **kwargs):
+ return NotImplemented("Write this method in your subclass of `Tool`.")
+
+ def setup(self):
+ """
+ Overwrite this method here for any operation that is expensive and needs to be executed before you start using
+ your tool. Such as loading a big model.
+ """
+ self.is_initialized = True
+
+ def save(self, output_dir):
+ """
+ Saves the relevant code files for your tool so it can be pushed to the Hub. This will copy the code of your
+ tool in `output_dir` as well as autogenerate:
+
+ - a config file named `tool_config.json`
+ - an `app.py` file so that your tool can be converted to a space
+ - a `requirements.txt` containing the names of the module used by your tool (as detected when inspecting its
+ code)
+
+ You should only use this method to save tools that are defined in a separate module (not `__main__`).
+
+ Args:
+ output_dir (`str`): The folder in which you want to save your tool.
+ """
+ os.makedirs(output_dir, exist_ok=True)
+ # Save module file
+ if self.__module__ == "__main__":
+ raise ValueError(
+ f"We can't save the code defining {self} in {output_dir} as it's been defined in __main__. You "
+ "have to put this code in a separate module so we can include it in the saved folder."
+ )
+ module_files = custom_object_save(self, output_dir)
+
+ module_name = self.__class__.__module__
+ last_module = module_name.split(".")[-1]
+ full_name = f"{last_module}.{self.__class__.__name__}"
+
+ # Save config file
+ config_file = os.path.join(output_dir, "tool_config.json")
+ if os.path.isfile(config_file):
+ with open(config_file, "r", encoding="utf-8") as f:
+ tool_config = json.load(f)
+ else:
+ tool_config = {}
+
+ tool_config = {"tool_class": full_name, "description": self.description, "name": self.name}
+ with open(config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tool_config, indent=2, sort_keys=True) + "\n")
+
+ # Save app file
+ app_file = os.path.join(output_dir, "app.py")
+ with open(app_file, "w", encoding="utf-8") as f:
+ f.write(APP_FILE_TEMPLATE.format(module_name=last_module, class_name=self.__class__.__name__))
+
+ # Save requirements file
+ requirements_file = os.path.join(output_dir, "requirements.txt")
+ imports = []
+ for module in module_files:
+ imports.extend(get_imports(module))
+ imports = list(set(imports))
+ with open(requirements_file, "w", encoding="utf-8") as f:
+ f.write("\n".join(imports) + "\n")
+
+ @classmethod
+ def from_hub(
+ cls,
+ repo_id: str,
+ model_repo_id: Optional[str] = None,
+ token: Optional[str] = None,
+ remote: bool = False,
+ **kwargs,
+ ):
+ """
+ Loads a tool defined on the Hub.
+
+
+
+ Loading a tool from the Hub means that you'll download the tool and execute it locally.
+ ALWAYS inspect the tool you're downloading before loading it within your runtime, as you would do when
+ installing a package using pip/npm/apt.
+
+
+
+ Args:
+ repo_id (`str`):
+ The name of the repo on the Hub where your tool is defined.
+ model_repo_id (`str`, *optional*):
+ If your tool uses a model and you want to use a different model than the default, you can pass a second
+ repo ID or an endpoint url to this argument.
+ token (`str`, *optional*):
+ The token to identify you on hf.co. If unset, will use the token generated when running
+ `huggingface-cli login` (stored in `~/.huggingface`).
+ remote (`bool`, *optional*, defaults to `False`):
+ Whether to use your tool by downloading the model or (if it is available) with an inference endpoint.
+ kwargs (additional keyword arguments, *optional*):
+ Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as
+ `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the
+ others will be passed along to its init.
+ """
+ if remote and model_repo_id is None:
+ endpoints = get_default_endpoints()
+ if repo_id not in endpoints:
+ raise ValueError(
+ f"Could not infer a default endpoint for {repo_id}, you need to pass one using the "
+ "`model_repo_id` argument."
+ )
+ model_repo_id = endpoints[repo_id]
+ hub_kwargs_names = [
+ "cache_dir",
+ "force_download",
+ "resume_download",
+ "proxies",
+ "revision",
+ "repo_type",
+ "subfolder",
+ "local_files_only",
+ ]
+ hub_kwargs = {k: v for k, v in kwargs.items() if k in hub_kwargs_names}
+
+ # Try to get the tool config first.
+ hub_kwargs["repo_type"] = get_repo_type(repo_id, **hub_kwargs)
+ resolved_config_file = cached_file(
+ repo_id,
+ TOOL_CONFIG_FILE,
+ token=token,
+ **hub_kwargs,
+ _raise_exceptions_for_gated_repo=False,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ )
+ is_tool_config = resolved_config_file is not None
+ if resolved_config_file is None:
+ resolved_config_file = cached_file(
+ repo_id,
+ CONFIG_NAME,
+ token=token,
+ **hub_kwargs,
+ _raise_exceptions_for_gated_repo=False,
+ _raise_exceptions_for_missing_entries=False,
+ _raise_exceptions_for_connection_errors=False,
+ )
+ if resolved_config_file is None:
+ raise EnvironmentError(
+ f"{repo_id} does not appear to provide a valid configuration in `tool_config.json` or `config.json`."
+ )
+
+ with open(resolved_config_file, encoding="utf-8") as reader:
+ config = json.load(reader)
+
+ if not is_tool_config:
+ if "custom_tool" not in config:
+ raise EnvironmentError(
+ f"{repo_id} does not provide a mapping to custom tools in its configuration `config.json`."
+ )
+ custom_tool = config["custom_tool"]
+ else:
+ custom_tool = config
+
+ tool_class = custom_tool["tool_class"]
+ tool_class = get_class_from_dynamic_module(tool_class, repo_id, token=token, **hub_kwargs)
+
+ if len(tool_class.name) == 0:
+ tool_class.name = custom_tool["name"]
+ if tool_class.name != custom_tool["name"]:
+ logger.warning(
+ f"{tool_class.__name__} implements a different name in its configuration and class. Using the tool "
+ "configuration name."
+ )
+ tool_class.name = custom_tool["name"]
+
+ if len(tool_class.description) == 0:
+ tool_class.description = custom_tool["description"]
+ if tool_class.description != custom_tool["description"]:
+ logger.warning(
+ f"{tool_class.__name__} implements a different description in its configuration and class. Using the "
+ "tool configuration description."
+ )
+ tool_class.description = custom_tool["description"]
+
+ if remote:
+ return RemoteTool(model_repo_id, token=token, tool_class=tool_class)
+ return tool_class(model_repo_id, token=token, **kwargs)
+
+ def push_to_hub(
+ self,
+ repo_id: str,
+ commit_message: str = "Upload tool",
+ private: Optional[bool] = None,
+ token: Optional[Union[bool, str]] = None,
+ create_pr: bool = False,
+ ) -> str:
+ """
+ Upload the tool to the Hub.
+
+ Parameters:
+ repo_id (`str`):
+ The name of the repository you want to push your tool to. It should contain your organization name when
+ pushing to a given organization.
+ commit_message (`str`, *optional*, defaults to `"Upload tool"`):
+ Message to commit while pushing.
+ private (`bool`, *optional*):
+ Whether or not the repository created should be private.
+ token (`bool` or `str`, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether or not to create a PR with the uploaded files or directly commit.
+ """
+ repo_url = create_repo(
+ repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type="space", space_sdk="gradio"
+ )
+ repo_id = repo_url.repo_id
+ metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space")
+
+ with tempfile.TemporaryDirectory() as work_dir:
+ # Save all files.
+ self.save(work_dir)
+ logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}")
+ return upload_folder(
+ repo_id=repo_id,
+ commit_message=commit_message,
+ folder_path=work_dir,
+ token=token,
+ create_pr=create_pr,
+ repo_type="space",
+ )
+
+ @staticmethod
+ def from_gradio(gradio_tool):
+ """
+ Creates a [`Tool`] from a gradio tool.
+ """
+
+ class GradioToolWrapper(Tool):
+ def __init__(self, _gradio_tool):
+ super().__init__()
+ self.name = _gradio_tool.name
+ self.description = _gradio_tool.description
+
+ GradioToolWrapper.__call__ = gradio_tool.run
+ return GradioToolWrapper(gradio_tool)
+
+
+class RemoteTool(Tool):
+ """
+ A [`Tool`] that will make requests to an inference endpoint.
+
+ Args:
+ endpoint_url (`str`, *optional*):
+ The url of the endpoint to use.
+ token (`str`, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when
+ running `huggingface-cli login` (stored in `~/.huggingface`).
+ tool_class (`type`, *optional*):
+ The corresponding `tool_class` if this is a remote version of an existing tool. Will help determine when
+ the output should be converted to another type (like images).
+ """
+
+ def __init__(self, endpoint_url=None, token=None, tool_class=None):
+ self.endpoint_url = endpoint_url
+ self.client = EndpointClient(endpoint_url, token=token)
+ self.tool_class = tool_class
+
+ def prepare_inputs(self, *args, **kwargs):
+ """
+ Prepare the inputs received for the HTTP client sending data to the endpoint. Positional arguments will be
+ matched with the signature of the `tool_class` if it was provided at instantation. Images will be encoded into
+ bytes.
+
+ You can override this method in your custom class of [`RemoteTool`].
+ """
+ inputs = kwargs.copy()
+ if len(args) > 0:
+ if self.tool_class is not None:
+ # Match args with the signature
+ if issubclass(self.tool_class, PipelineTool):
+ call_method = self.tool_class.encode
+ else:
+ call_method = self.tool_class.__call__
+ signature = inspect.signature(call_method).parameters
+ parameters = [
+ k
+ for k, p in signature.items()
+ if p.kind not in [inspect._ParameterKind.VAR_POSITIONAL, inspect._ParameterKind.VAR_KEYWORD]
+ ]
+ if parameters[0] == "self":
+ parameters = parameters[1:]
+ if len(args) > len(parameters):
+ raise ValueError(
+ f"{self.tool_class} only accepts {len(parameters)} arguments but {len(args)} were given."
+ )
+ for arg, name in zip(args, parameters):
+ inputs[name] = arg
+ elif len(args) > 1:
+ raise ValueError("A `RemoteTool` can only accept one positional input.")
+ elif len(args) == 1:
+ if is_pil_image(args[0]):
+ return {"inputs": self.client.encode_image(args[0])}
+ return {"inputs": args[0]}
+
+ for key, value in inputs.items():
+ if is_pil_image(value):
+ inputs[key] = self.client.encode_image(value)
+
+ return {"inputs": inputs}
+
+ def extract_outputs(self, outputs):
+ """
+ You can override this method in your custom class of [`RemoteTool`] to apply some custom post-processing of the
+ outputs of the endpoint.
+ """
+ return outputs
+
+ def __call__(self, *args, **kwargs):
+ args, kwargs = handle_agent_inputs(*args, **kwargs)
+
+ output_image = self.tool_class is not None and self.tool_class.outputs == ["image"]
+ inputs = self.prepare_inputs(*args, **kwargs)
+ if isinstance(inputs, dict):
+ outputs = self.client(**inputs, output_image=output_image)
+ else:
+ outputs = self.client(inputs, output_image=output_image)
+ if isinstance(outputs, list) and len(outputs) == 1 and isinstance(outputs[0], list):
+ outputs = outputs[0]
+
+ outputs = handle_agent_outputs(outputs, self.tool_class.outputs if self.tool_class is not None else None)
+
+ return self.extract_outputs(outputs)
+
+
+class PipelineTool(Tool):
+ """
+ A [`Tool`] tailored towards Transformer models. On top of the class attributes of the base class [`Tool`], you will
+ need to specify:
+
+ - **model_class** (`type`) -- The class to use to load the model in this tool.
+ - **default_checkpoint** (`str`) -- The default checkpoint that should be used when the user doesn't specify one.
+ - **pre_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the
+ pre-processor
+ - **post_processor_class** (`type`, *optional*, defaults to [`AutoProcessor`]) -- The class to use to load the
+ post-processor (when different from the pre-processor).
+
+ Args:
+ model (`str` or [`PreTrainedModel`], *optional*):
+ The name of the checkpoint to use for the model, or the instantiated model. If unset, will default to the
+ value of the class attribute `default_checkpoint`.
+ pre_processor (`str` or `Any`, *optional*):
+ The name of the checkpoint to use for the pre-processor, or the instantiated pre-processor (can be a
+ tokenizer, an image processor, a feature extractor or a processor). Will default to the value of `model` if
+ unset.
+ post_processor (`str` or `Any`, *optional*):
+ The name of the checkpoint to use for the post-processor, or the instantiated pre-processor (can be a
+ tokenizer, an image processor, a feature extractor or a processor). Will default to the `pre_processor` if
+ unset.
+ device (`int`, `str` or `torch.device`, *optional*):
+ The device on which to execute the model. Will default to any accelerator available (GPU, MPS etc...), the
+ CPU otherwise.
+ device_map (`str` or `dict`, *optional*):
+ If passed along, will be used to instantiate the model.
+ model_kwargs (`dict`, *optional*):
+ Any keyword argument to send to the model instantiation.
+ token (`str`, *optional*):
+ The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when
+ running `huggingface-cli login` (stored in `~/.huggingface`).
+ hub_kwargs (additional keyword arguments, *optional*):
+ Any additional keyword argument to send to the methods that will load the data from the Hub.
+ """
+
+ pre_processor_class = AutoProcessor
+ model_class = None
+ post_processor_class = AutoProcessor
+ default_checkpoint = None
+
+ def __init__(
+ self,
+ model=None,
+ pre_processor=None,
+ post_processor=None,
+ device=None,
+ device_map=None,
+ model_kwargs=None,
+ token=None,
+ **hub_kwargs,
+ ):
+ if not is_torch_available():
+ raise ImportError("Please install torch in order to use this tool.")
+
+ if not is_accelerate_available():
+ raise ImportError("Please install accelerate in order to use this tool.")
+
+ if model is None:
+ if self.default_checkpoint is None:
+ raise ValueError("This tool does not implement a default checkpoint, you need to pass one.")
+ model = self.default_checkpoint
+ if pre_processor is None:
+ pre_processor = model
+
+ self.model = model
+ self.pre_processor = pre_processor
+ self.post_processor = post_processor
+ self.device = device
+ self.device_map = device_map
+ self.model_kwargs = {} if model_kwargs is None else model_kwargs
+ if device_map is not None:
+ self.model_kwargs["device_map"] = device_map
+ self.hub_kwargs = hub_kwargs
+ self.hub_kwargs["token"] = token
+
+ super().__init__()
+
+ def setup(self):
+ """
+ Instantiates the `pre_processor`, `model` and `post_processor` if necessary.
+ """
+ if isinstance(self.pre_processor, str):
+ self.pre_processor = self.pre_processor_class.from_pretrained(self.pre_processor, **self.hub_kwargs)
+
+ if isinstance(self.model, str):
+ self.model = self.model_class.from_pretrained(self.model, **self.model_kwargs, **self.hub_kwargs)
+
+ if self.post_processor is None:
+ self.post_processor = self.pre_processor
+ elif isinstance(self.post_processor, str):
+ self.post_processor = self.post_processor_class.from_pretrained(self.post_processor, **self.hub_kwargs)
+
+ if self.device is None:
+ if self.device_map is not None:
+ self.device = list(self.model.hf_device_map.values())[0]
+ else:
+ self.device = PartialState().default_device
+
+ if self.device_map is None:
+ self.model.to(self.device)
+
+ super().setup()
+
+ def encode(self, raw_inputs):
+ """
+ Uses the `pre_processor` to prepare the inputs for the `model`.
+ """
+ return self.pre_processor(raw_inputs)
+
+ def forward(self, inputs):
+ """
+ Sends the inputs through the `model`.
+ """
+ with torch.no_grad():
+ return self.model(**inputs)
+
+ def decode(self, outputs):
+ """
+ Uses the `post_processor` to decode the model output.
+ """
+ return self.post_processor(outputs)
+
+ def __call__(self, *args, **kwargs):
+ args, kwargs = handle_agent_inputs(*args, **kwargs)
+
+ if not self.is_initialized:
+ self.setup()
+
+ encoded_inputs = self.encode(*args, **kwargs)
+ encoded_inputs = send_to_device(encoded_inputs, self.device)
+ outputs = self.forward(encoded_inputs)
+ outputs = send_to_device(outputs, "cpu")
+ decoded_outputs = self.decode(outputs)
+
+ return handle_agent_outputs(decoded_outputs, self.outputs)
+
+
+def launch_gradio_demo(tool_class: Tool):
+ """
+ Launches a gradio demo for a tool. The corresponding tool class needs to properly implement the class attributes
+ `inputs` and `outputs`.
+
+ Args:
+ tool_class (`type`): The class of the tool for which to launch the demo.
+ """
+ try:
+ import gradio as gr
+ except ImportError:
+ raise ImportError("Gradio should be installed in order to launch a gradio demo.")
+
+ tool = tool_class()
+
+ def fn(*args, **kwargs):
+ return tool(*args, **kwargs)
+
+ gr.Interface(
+ fn=fn,
+ inputs=tool_class.inputs,
+ outputs=tool_class.outputs,
+ title=tool_class.__name__,
+ article=tool.description,
+ ).launch()
+
+
+TASK_MAPPING = {
+ "document-question-answering": "DocumentQuestionAnsweringTool",
+ "image-captioning": "ImageCaptioningTool",
+ "image-question-answering": "ImageQuestionAnsweringTool",
+ "image-segmentation": "ImageSegmentationTool",
+ "speech-to-text": "SpeechToTextTool",
+ "summarization": "TextSummarizationTool",
+ "text-classification": "TextClassificationTool",
+ "text-question-answering": "TextQuestionAnsweringTool",
+ "text-to-speech": "TextToSpeechTool",
+ "translation": "TranslationTool",
+}
+
+
+def get_default_endpoints():
+ endpoints_file = cached_file("huggingface-tools/default-endpoints", "default_endpoints.json", repo_type="dataset")
+ with open(endpoints_file, "r", encoding="utf-8") as f:
+ endpoints = json.load(f)
+ return endpoints
+
+
+def supports_remote(task_or_repo_id):
+ endpoints = get_default_endpoints()
+ return task_or_repo_id in endpoints
+
+
+def load_tool(task_or_repo_id, model_repo_id=None, remote=False, token=None, **kwargs):
+ """
+ Main function to quickly load a tool, be it on the Hub or in the Transformers library.
+
+
+
+ Loading a tool means that you'll download the tool and execute it locally.
+ ALWAYS inspect the tool you're downloading before loading it within your runtime, as you would do when
+ installing a package using pip/npm/apt.
+
+
+
+ Args:
+ task_or_repo_id (`str`):
+ The task for which to load the tool or a repo ID of a tool on the Hub. Tasks implemented in Transformers
+ are:
+
+ - `"document-question-answering"`
+ - `"image-captioning"`
+ - `"image-question-answering"`
+ - `"image-segmentation"`
+ - `"speech-to-text"`
+ - `"summarization"`
+ - `"text-classification"`
+ - `"text-question-answering"`
+ - `"text-to-speech"`
+ - `"translation"`
+
+ model_repo_id (`str`, *optional*):
+ Use this argument to use a different model than the default one for the tool you selected.
+ remote (`bool`, *optional*, defaults to `False`):
+ Whether to use your tool by downloading the model or (if it is available) with an inference endpoint.
+ token (`str`, *optional*):
+ The token to identify you on hf.co. If unset, will use the token generated when running `huggingface-cli
+ login` (stored in `~/.huggingface`).
+ kwargs (additional keyword arguments, *optional*):
+ Additional keyword arguments that will be split in two: all arguments relevant to the Hub (such as
+ `cache_dir`, `revision`, `subfolder`) will be used when downloading the files for your tool, and the others
+ will be passed along to its init.
+ """
+ if task_or_repo_id in TASK_MAPPING:
+ tool_class_name = TASK_MAPPING[task_or_repo_id]
+ main_module = importlib.import_module("transformers")
+ tools_module = main_module.tools
+ tool_class = getattr(tools_module, tool_class_name)
+
+ if remote:
+ if model_repo_id is None:
+ endpoints = get_default_endpoints()
+ if task_or_repo_id not in endpoints:
+ raise ValueError(
+ f"Could not infer a default endpoint for {task_or_repo_id}, you need to pass one using the "
+ "`model_repo_id` argument."
+ )
+ model_repo_id = endpoints[task_or_repo_id]
+ return RemoteTool(model_repo_id, token=token, tool_class=tool_class)
+ else:
+ return tool_class(model_repo_id, token=token, **kwargs)
+ else:
+ logger.warning_once(
+ f"You're loading a tool from the Hub from {model_repo_id}. Please make sure this is a source that you "
+ f"trust as the code within that tool will be executed on your machine. Always verify the code of "
+ f"the tools that you load. We recommend specifying a `revision` to ensure you're loading the "
+ f"code that you have checked."
+ )
+ return Tool.from_hub(task_or_repo_id, model_repo_id=model_repo_id, token=token, remote=remote, **kwargs)
+
+
+def add_description(description):
+ """
+ A decorator that adds a description to a function.
+ """
+
+ def inner(func):
+ func.description = description
+ func.name = func.__name__
+ return func
+
+ return inner
+
+
+## Will move to the Hub
+class EndpointClient:
+ def __init__(self, endpoint_url: str, token: Optional[str] = None):
+ self.headers = {**build_hf_headers(token=token), "Content-Type": "application/json"}
+ self.endpoint_url = endpoint_url
+
+ @staticmethod
+ def encode_image(image):
+ _bytes = io.BytesIO()
+ image.save(_bytes, format="PNG")
+ b64 = base64.b64encode(_bytes.getvalue())
+ return b64.decode("utf-8")
+
+ @staticmethod
+ def decode_image(raw_image):
+ if not is_vision_available():
+ raise ImportError(
+ "This tool returned an image but Pillow is not installed. Please install it (`pip install Pillow`)."
+ )
+
+ from PIL import Image
+
+ b64 = base64.b64decode(raw_image)
+ _bytes = io.BytesIO(b64)
+ return Image.open(_bytes)
+
+ def __call__(
+ self,
+ inputs: Optional[Union[str, Dict, List[str], List[List[str]]]] = None,
+ params: Optional[Dict] = None,
+ data: Optional[bytes] = None,
+ output_image: bool = False,
+ ) -> Any:
+ # Build payload
+ payload = {}
+ if inputs:
+ payload["inputs"] = inputs
+ if params:
+ payload["parameters"] = params
+
+ # Make API call
+ response = get_session().post(self.endpoint_url, headers=self.headers, json=payload, data=data)
+
+ # By default, parse the response for the user.
+ if output_image:
+ return self.decode_image(response.content)
+ else:
+ return response.json()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/document_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/document_question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b5e8782bd785f18001a4d7f3e3dac6a840506c5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/document_question_answering.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from ..models.auto import AutoProcessor
+from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
+from ..utils import is_vision_available
+from .base import PipelineTool
+
+
+if is_vision_available():
+ from PIL import Image
+
+
+class DocumentQuestionAnsweringTool(PipelineTool):
+ default_checkpoint = "naver-clova-ix/donut-base-finetuned-docvqa"
+ description = (
+ "This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
+ "should be the document containing the information, as well as a `question` that is the question about the "
+ "document. It returns a text that contains the answer to the question."
+ )
+ name = "document_qa"
+ pre_processor_class = AutoProcessor
+ model_class = VisionEncoderDecoderModel
+
+ inputs = ["image", "text"]
+ outputs = ["text"]
+
+ def __init__(self, *args, **kwargs):
+ if not is_vision_available():
+ raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
+
+ super().__init__(*args, **kwargs)
+
+ def encode(self, document: "Image", question: str):
+ task_prompt = "{user_input}"
+ prompt = task_prompt.replace("{user_input}", question)
+ decoder_input_ids = self.pre_processor.tokenizer(
+ prompt, add_special_tokens=False, return_tensors="pt"
+ ).input_ids
+ pixel_values = self.pre_processor(document, return_tensors="pt").pixel_values
+
+ return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
+
+ def forward(self, inputs):
+ return self.model.generate(
+ inputs["pixel_values"].to(self.device),
+ decoder_input_ids=inputs["decoder_input_ids"].to(self.device),
+ max_length=self.model.decoder.config.max_position_embeddings,
+ early_stopping=True,
+ pad_token_id=self.pre_processor.tokenizer.pad_token_id,
+ eos_token_id=self.pre_processor.tokenizer.eos_token_id,
+ use_cache=True,
+ num_beams=1,
+ bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],
+ return_dict_in_generate=True,
+ ).sequences
+
+ def decode(self, outputs):
+ sequence = self.pre_processor.batch_decode(outputs)[0]
+ sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, "")
+ sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, "")
+ sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token
+ sequence = self.pre_processor.token2json(sequence)
+
+ return sequence["answer"]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9d14fab56cd6b2a10916d6daaf8ecfdd9900f6a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/evaluate_agent.py
@@ -0,0 +1,692 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from .agents import BASE_PYTHON_TOOLS, clean_code_for_chat
+from .python_interpreter import InterpretorError, evaluate
+
+
+### Fake tools for test
+def classifier(text, labels):
+ return f"This is the classification of {text} along {labels}."
+
+
+def translator(text, src_lang, tgt_lang):
+ return f"This is the translation of {text} from {src_lang} to {tgt_lang}."
+
+
+def speaker(text):
+ return f"This is actually a sound reading {text}."
+
+
+def transcriber(audio):
+ if "sound" not in audio:
+ raise ValueError(f"`audio` ({audio}) is not a sound.")
+ return f"This is the transcribed text from {audio}."
+
+
+def image_generator(prompt):
+ return f"This is actually an image representing {prompt}."
+
+
+def image_captioner(image):
+ if "image" not in image:
+ raise ValueError(f"`image` ({image}) is not an image.")
+ return f"This is a description of {image}."
+
+
+def image_transformer(image, prompt):
+ if "image" not in image:
+ raise ValueError(f"`image` ({image}) is not an image.")
+ return f"This is a transformation of {image} according to {prompt}."
+
+
+def question_answerer(text, question):
+ return f"This is the answer to {question} from {text}."
+
+
+def image_qa(image, question):
+ if "image" not in image:
+ raise ValueError(f"`image` ({image}) is not an image.")
+ return f"This is the answer to {question} from {image}."
+
+
+def text_downloader(url):
+ return f"This is the content of {url}."
+
+
+def summarizer(text):
+ return f"This is a summary of {text}."
+
+
+def video_generator(prompt, seconds=2):
+ return f"A video of {prompt}"
+
+
+def document_qa(image, question):
+ return f"This is the answer to {question} from the document {image}."
+
+
+def image_segmenter(image, prompt):
+ return f"This is the mask of {prompt} in {image}"
+
+
+TEST_TOOLS = {
+ "text_classifier": classifier,
+ "translator": translator,
+ "text_reader": speaker,
+ "summarizer": summarizer,
+ "transcriber": transcriber,
+ "image_generator": image_generator,
+ "image_captioner": image_captioner,
+ "image_transformer": image_transformer,
+ "text_qa": question_answerer,
+ "text_downloader": text_downloader,
+ "image_qa": image_qa,
+ "video_generator": video_generator,
+ "document_qa": document_qa,
+ "image_segmenter": image_segmenter,
+}
+
+
+class Problem:
+ """
+ A class regrouping all the information to solve a problem on which we will evaluate agents.
+
+ Args:
+ task (`str` ou `list[str]`):
+ One or several descriptions of the task to perform. If a list, it should contain variations on the
+ phrasing, but for the same task.
+ inputs (`list[str]` or `dict[str, str]`):
+ The inputs that will be fed to the tools. For this testing environment, only strings are accepted as
+ values. Pass along a dictionary when you want to specify the values of each inputs, or just the list of
+ inputs expected (the value used will be `<>` in this case).
+ answer (`str` or `list[str`]):
+ The theoretical answer (or list of possible valid answers) to the problem, as code.
+ """
+
+ def __init__(self, task, inputs, answer):
+ self.task = task
+ self.inputs = inputs
+ self.answer = answer
+
+
+### The list of problems the agent will be evaluated on.
+EVALUATION_TASKS = [
+ Problem(
+ task=[
+ "Is the following `text` (in Spanish) positive or negative?",
+ "Is the text in the variable `text` (in Spanish) positive or negative?",
+ "Translate the following `text` from Spanish to English then tell me if its positive or negative.",
+ ],
+ inputs=["text"],
+ answer="""text_classifier(translator(text, src_lang="Spanish", tgt_lang="English"), labels=["positive", "negative"])""",
+ ),
+ Problem(
+ task=[
+ "Tell me out loud what the `image` contains.",
+ "Describe the following `image` out loud.",
+ "Find what is in the picture stored in `image` then read it out loud.",
+ ],
+ inputs=["image"],
+ answer=[
+ "text_reader(image_captioner(image))",
+ "text_reader(image_qa(image, question='What is in the image?'))",
+ ],
+ ),
+ Problem(
+ task=[
+ "Generate an image from the text given in `text_input`. Then transform it according to the text in `prompt`.",
+ "Use the following `text_input` to generate an image, then transform it by using the text in `prompt`.",
+ ],
+ inputs=["text_input", "prompt"],
+ answer="image_transformer(image_generator(text_input), prompt)",
+ ),
+ Problem(
+ task=[
+ "Download the content of `url`, summarize it then generate an image from its content.",
+ "Use a summary of the web page at `url` to generate an image.",
+ "Summarize the content of the web page at `url`, and use the result to generate an image.",
+ ],
+ inputs=["url"],
+ answer="image_generator(summarizer(text_downloader(url)))",
+ ),
+ Problem(
+ task=[
+ "Transform the following `image` using the prompt in `text`. The prompt is in Spanish.",
+ "Use the text prompt in `text` (in Spanish) to transform the following `image`.",
+ "Translate the `text` from Spanish to English then use it to transform the picture in `image`.",
+ ],
+ inputs=["text", "image"],
+ answer="image_transformer(image, translator(text, src_lang='Spanish', tgt_lang='English'))",
+ ),
+ Problem(
+ task=[
+ "Download the content of `url`, summarize it then read it out loud to me.",
+ "Read me a summary of the web page at `url`.",
+ ],
+ inputs=["url"],
+ answer="text_reader(summarizer(text_downloader(url)))",
+ ),
+ Problem(
+ task=[
+ "Generate an image from the text given in `text_input`.",
+ ],
+ inputs=["text_input"],
+ answer="image_generator(text_input)",
+ ),
+ Problem(
+ task=[
+ "Replace the beaver in the `image` by the `prompt`.",
+ "Transform the `image` so that it contains the `prompt`.",
+ "Use `prompt` to transform this `image`.",
+ ],
+ inputs=["image", "prompt"],
+ answer="image_transformer(image, prompt)",
+ ),
+ Problem(
+ task=[
+ "Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.",
+ "Summarize `text`, read it out loud then transcribe the audio and translate it in French.",
+ "Read me a summary of the `text` out loud. Transcribe this and translate it in French.",
+ ],
+ inputs=["text"],
+ answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')",
+ ),
+ Problem(
+ task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."],
+ inputs={"prompt": "A lobster swimming"},
+ answer="video_generator('A lobster swimming')",
+ ),
+ Problem(
+ task=[
+ "Download the following file `url`, summarize it in a few words and generate a video from it."
+ "Fetch the file at this `url`, summarize it, and create an animation out of it."
+ ],
+ inputs=["url"],
+ answer="video_generator(summarizer(text_downloader(url)))",
+ ),
+]
+
+
+EVALUATION_CHATS = [
+ [
+ Problem(
+ task=[
+ "Translate the following `text` from Spanish to English.",
+ "Translate the following `text` from Spanish to English.",
+ ],
+ inputs=["text"],
+ answer="translated_text=translator(text, src_lang='Spanish', tgt_lang='English')",
+ ),
+ Problem(
+ task=[
+ "Is it positive or negative?",
+ "Tell me if its positive or negative.",
+ ],
+ inputs=[],
+ answer="text_classifier(translated_text, labels=['positive', 'negative'])",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "What does this `image` contain?",
+ "Describe the following `image`.",
+ "Find what is in the picture stored in `image`",
+ ],
+ inputs=["image"],
+ answer=[
+ "description=image_captioner(image)",
+ "description=image_qa(image, question='What is in the image?')",
+ ],
+ ),
+ Problem(
+ task=["Now, read the description out loud.", "Great! Can you read it out loud?", "Read it out loud."],
+ inputs=[],
+ answer=["audio=text_reader(description)", "audio=text_reader(description)"],
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Generate an image from the text given in `text_input`.",
+ "Use the following `text_input` to generate an image",
+ ],
+ inputs=["text_input"],
+ answer="image = image_generator(text_input)",
+ ),
+ Problem(
+ task=[
+ "Transform it according to the text in `prompt`.",
+ "Transform it by using the text in `prompt`.",
+ ],
+ inputs=["prompt"],
+ answer="image_transformer(image, prompt)",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Download the content of `url` and summarize it.",
+ "Summarize the content of the web page at `url`.",
+ ],
+ inputs=["url"],
+ answer="summary = summarizer(text_downloader(url))",
+ ),
+ Problem(
+ task=[
+ "Generate an image from its content.",
+ "Use the previous result to generate an image.",
+ ],
+ inputs=[],
+ answer="image_generator(summary)",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Translate this Spanish `text` in English.",
+ "Translate the `text` from Spanish to English.",
+ ],
+ inputs=["text"],
+ answer="translated_text = translator(text, src_lang='Spanish', tgt_lang='English')",
+ ),
+ Problem(
+ task=[
+ "Transform the following `image` using the translated `text`.",
+ "Use the previous result to transform the following `image`.",
+ ],
+ inputs=["image"],
+ answer="image_transformer(image, translated_text)",
+ ),
+ ],
+ [
+ Problem(
+ task=["Download the content of `url`.", "Get me the text on the weg page `url`."],
+ inputs=["url"],
+ answer="text = text_downloader(url)",
+ ),
+ Problem(
+ task=["Summarize this text.", "Summarize this text."],
+ inputs=[],
+ answer="summary = summarizer(text)",
+ ),
+ Problem(
+ task=["Read it out loud to me.", "Read me the previous result."],
+ inputs=[],
+ answer="text_reader(summary)",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Generate an image from the text given in `text_input`.",
+ ],
+ inputs=["text_input"],
+ answer="image_generator(text_input)",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Replace the beaver in the `image` by the `prompt`.",
+ "Transform the `image` so that it contains the `prompt`.",
+ "Use `prompt` to transform this `image`.",
+ ],
+ inputs=["image", "prompt"],
+ answer="image_transformer(image, prompt)",
+ ),
+ ],
+ [
+ Problem(
+ task=["Provide me the summary of the `text`.", "Summarize `text`."],
+ inputs=["text"],
+ answer="summary = summarizer(text)",
+ ),
+ Problem(
+ task=["Read this summary to me.", "Read it out loud."],
+ inputs=[],
+ answer="audio = text_reader(summarizer(text))",
+ ),
+ Problem(
+ task=["Transcribing the previous result back in text.", "Transcribe the audio."],
+ inputs=[],
+ answer="text = transcriber(audio)",
+ ),
+ Problem(
+ task=["Translating the last result in French.", "Translate this in French."],
+ inputs=[],
+ answer="translator(text, src_lang='English', tgt_lang='French')",
+ ),
+ ],
+ [
+ Problem(
+ task=["Generate a video of the `prompt`", "Animate a `prompt`", "Make me a short video using `prompt`."],
+ inputs={"prompt": "A lobster swimming"},
+ answer="video_generator('A lobster swimming')",
+ ),
+ ],
+ [
+ Problem(
+ task=[
+ "Download the content of `url` and summarize it.",
+ "Summarize the content of the web page at `url`.",
+ ],
+ inputs=["url"],
+ answer="summary = summarizer(text_downloader(url))",
+ ),
+ Problem(
+ task=["generate a video from it.", "Create an animation from the last result."],
+ inputs=[],
+ answer="video_generator(summary)",
+ ),
+ ],
+]
+
+
+def get_theoretical_tools(agent_answer, theoretical_answer, code_answer):
+ if not isinstance(theoretical_answer, list):
+ return {name for name in TEST_TOOLS if name in code_answer}
+
+ if isinstance(agent_answer, dict):
+ for one_answer, one_code in zip(theoretical_answer, code_answer):
+ if one_answer in agent_answer.values():
+ return {name for name in TEST_TOOLS if name in one_code}
+
+ for one_answer, one_code in zip(theoretical_answer, code_answer):
+ if agent_answer == one_answer:
+ return {name for name in TEST_TOOLS if name in one_code}
+
+ return {name for name in TEST_TOOLS if name in code_answer[0]}
+
+
+def evaluate_code(code, inputs=None, state=None, verbose=False, return_interpretor_error=False):
+ tools = BASE_PYTHON_TOOLS.copy()
+ for name, tool in TEST_TOOLS.items():
+ if name not in code:
+ continue
+ tools[name] = tool
+
+ if isinstance(inputs, dict):
+ inputs = inputs.copy()
+ elif inputs is not None:
+ inputs = {inp: f"<<{inp}>>" for inp in inputs}
+
+ if state is not None:
+ state.update(inputs)
+ else:
+ state = inputs
+
+ try:
+ return evaluate(code, tools, state)
+ except InterpretorError as e:
+ return str(e)
+ except Exception as e:
+ if verbose:
+ print(e)
+ return None
+
+
+def score_code(agent_answer, theoretical_answer, verbose: bool = False):
+ if verbose:
+ print(agent_answer, theoretical_answer)
+ theoretical_answer = theoretical_answer if isinstance(theoretical_answer, list) else [theoretical_answer]
+
+ if agent_answer in theoretical_answer:
+ if verbose:
+ print("Perfect!")
+ return 1
+ elif isinstance(agent_answer, dict) and any(v in theoretical_answer for v in agent_answer.values()):
+ if verbose:
+ print("Almsot perfect, result in state!")
+ return 0.75
+ else:
+ if verbose:
+ print("Result is not the right one but code executed.")
+ return 0.3
+
+
+def evaluate_one_result(explanation, code, agent_answer, theoretical_answer, answer, verbose=False):
+ tools_in_explanation = {name for name in TEST_TOOLS if f"`{name}`" in explanation}
+ theoretical_tools = get_theoretical_tools(agent_answer, theoretical_answer, answer)
+ if tools_in_explanation == theoretical_tools:
+ tool_selection_score = 1.0
+ tool_selection_errors = None
+ else:
+ missing_tools = len(theoretical_tools - tools_in_explanation)
+ unexpected_tools = len(tools_in_explanation - theoretical_tools)
+ tool_selection_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools)
+
+ tool_selection_errors = {
+ "selected_tools": tools_in_explanation,
+ "theoretical_tools": theoretical_tools,
+ }
+
+ tools_in_code = {name for name in TEST_TOOLS if name in code}
+ if tools_in_code == theoretical_tools:
+ tool_used_score = 1.0
+ tool_used_errors = None
+ else:
+ missing_tools = len(theoretical_tools - tools_in_code)
+ unexpected_tools = len(tools_in_code - theoretical_tools)
+ tool_used_score = max(0, 1.0 - 0.25 * missing_tools - 0.25 * unexpected_tools)
+
+ tool_used_errors = {
+ "selected_tools": tools_in_explanation,
+ "theoretical_tools": theoretical_tools,
+ }
+
+ score = score_code(agent_answer, theoretical_answer, verbose=verbose)
+ if score < 1.0:
+ code_errors = {
+ "code_produced": code,
+ "evaluation": agent_answer,
+ "theoretical_answer": theoretical_answer,
+ }
+ else:
+ code_errors = None
+
+ return (tool_selection_score, tool_used_score, score), (tool_selection_errors, tool_used_errors, code_errors)
+
+
+def evaluate_agent(agent, batch_size=8, verbose=False, return_errors=False):
+ """
+ Evaluates a new agent on all `EVALUATION_TASKS`.
+
+ Example:
+
+ ```py
+ agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key)
+ bads = new_evaluate_agent(agent)
+ for bad in bads:
+ print(bad)
+ ```
+ """
+ # Sanity check
+ agent_tools = set(agent.toolbox.keys())
+ if agent_tools != set(TEST_TOOLS):
+ missing_tools = set(TEST_TOOLS) - agent_tools
+ unexpected_tools = set(agent_tools) - TEST_TOOLS
+ raise ValueError(
+ f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}."
+ )
+
+ eval_tasks = []
+ eval_idx = []
+ for idx, pb in enumerate(EVALUATION_TASKS):
+ if isinstance(pb.task, list):
+ eval_tasks.extend(pb.task)
+ eval_idx.extend([idx] * len(pb.task))
+ else:
+ eval_tasks.append(pb.task)
+ eval_idx.append(idx)
+
+ tool_selection_score = 0
+ tool_used_score = 0
+ code_score = 0
+
+ if return_errors:
+ tool_selection_errors = {}
+ tool_used_errors = {}
+ code_errors = {}
+
+ for start_idx in range(0, len(eval_tasks), batch_size):
+ end_idx = min(start_idx + batch_size, len(eval_tasks))
+ batch_tasks = eval_tasks[start_idx:end_idx]
+
+ prompts = [agent.format_prompt(task) for task in batch_tasks]
+ results = agent.generate_many(prompts, stop=["Task:"])
+
+ for idx, result in enumerate(results):
+ problem = EVALUATION_TASKS[eval_idx[start_idx + idx]]
+ if verbose:
+ print(f"====Task {start_idx + idx}====\n{batch_tasks[idx]}\n")
+ explanation, code = agent.clean_code_for_run(result)
+
+ # Evaluate agent answer and code answer
+ agent_answer = evaluate_code(code, problem.inputs, verbose=verbose)
+ if isinstance(problem.answer, list):
+ theoretical_answer = [evaluate_code(answer, problem.inputs) for answer in problem.answer]
+ else:
+ theoretical_answer = evaluate_code(problem.answer, problem.inputs)
+
+ scores, errors = evaluate_one_result(
+ explanation, code, agent_answer, theoretical_answer, problem.answer, verbose=verbose
+ )
+
+ tool_selection_score += scores[0]
+ tool_used_score += scores[1]
+ code_score += scores[2]
+
+ if return_errors:
+ if errors[0] is not None:
+ tool_selection_errors[batch_tasks[idx]] = errors[0]
+ if errors[1] is not None:
+ tool_used_errors[batch_tasks[idx]] = errors[1]
+ if errors[2] is not None:
+ code_errors[batch_tasks[idx]] = errors[2]
+
+ scores = {
+ "tool selection score": 100 * (tool_selection_score / len(eval_tasks)),
+ "tool used score": 100 * (tool_used_score / len(eval_tasks)),
+ "code score": 100 * (code_score / len(eval_tasks)),
+ }
+
+ if return_errors:
+ return scores, tool_selection_errors, tool_used_errors, code_errors
+ else:
+ return scores
+
+
+def evaluate_chat_agent(agent, verbose=False, return_errors=False):
+ """
+ Evaluates a new agent on all `EVALUATION_CHATS`.
+
+ Example:
+
+ ```py
+ agent = NewOpenAiAgent(model="text-davinci-003", api_key=your_api_key)
+ bads = new_evaluate_agent(agent)
+ for bad in bads:
+ print(bad)
+ ```
+ """
+ # Sanity check
+ agent_tools = set(agent.toolbox.keys())
+ if agent_tools != set(TEST_TOOLS):
+ missing_tools = set(TEST_TOOLS) - agent_tools
+ unexpected_tools = agent_tools - set(TEST_TOOLS)
+ raise ValueError(
+ f"Fix the test tools in the evaluate_agent module. Tools mising: {missing_tools}. Extra tools: {unexpected_tools}."
+ )
+
+ tool_selection_score = 0
+ tool_used_score = 0
+ code_score = 0
+ total_steps = 0
+
+ if return_errors:
+ tool_selection_errors = {}
+ tool_used_errors = {}
+ code_errors = {}
+
+ for chat_problem in EVALUATION_CHATS:
+ if isinstance(chat_problem[0].task, str):
+ resolved_problems = [chat_problem]
+ else:
+ resolved_problems = [
+ [Problem(task=pb.task[i], inputs=pb.inputs, answer=pb.answer) for pb in chat_problem]
+ for i in range(len(chat_problem[0].task))
+ ]
+ for problem in resolved_problems:
+ agent.prepare_for_new_chat()
+ agent_state = {}
+ theoretical_state = (
+ [{} for _ in range(len(problem[0].answer))] if isinstance(problem[0].answer, list) else {}
+ )
+
+ for step, step_problem in enumerate(problem):
+ if verbose:
+ print(step_problem.task)
+ total_steps += 1
+ prompt = agent.format_prompt(step_problem.task, chat_mode=True)
+ result = agent.generate_one(prompt, stop=["Human:", "====="])
+ agent.chat_history = prompt + result + "\n"
+
+ explanation, code = clean_code_for_chat(result)
+
+ if verbose:
+ print(f"==Explanation from the agent==\n{explanation}")
+ print(f"\n==Code generated by the agent==\n{code}")
+
+ # Evaluate agent answer and code answer
+ agent_answer = evaluate_code(code, step_problem.inputs, state=agent_state, verbose=verbose)
+
+ answer = step_problem.answer
+ if isinstance(answer, list):
+ theoretical_answer = [
+ evaluate_code(a, step_problem.inputs, state=state)
+ for a, state in zip(answer, theoretical_state)
+ ]
+ else:
+ theoretical_answer = evaluate_code(answer, step_problem.inputs, state=theoretical_state)
+
+ scores, errors = evaluate_one_result(
+ explanation, code, agent_answer, theoretical_answer, answer, verbose=verbose
+ )
+
+ tool_selection_score += scores[0]
+ tool_used_score += scores[1]
+ code_score += scores[2]
+
+ if return_errors:
+ if errors[0] is not None:
+ tool_selection_errors[step_problem.task] = errors[0]
+ if errors[1] is not None:
+ tool_used_errors[step_problem.task] = errors[1]
+ if errors[2] is not None:
+ code_errors[step_problem.task] = errors[2]
+
+ scores = {
+ "tool selection score": 100 * (tool_selection_score / total_steps),
+ "tool used score": 100 * (tool_used_score / total_steps),
+ "code score": 100 * (code_score / total_steps),
+ }
+
+ if return_errors:
+ return scores, tool_selection_errors, tool_used_errors, code_errors
+ else:
+ return scores
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_captioning.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_captioning.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfcf0bc8dc2834bf10ba7c03929743692756837a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_captioning.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ..models.auto import AutoModelForVision2Seq
+from ..utils import requires_backends
+from .base import PipelineTool
+
+
+if TYPE_CHECKING:
+ from PIL import Image
+
+
+class ImageCaptioningTool(PipelineTool):
+ default_checkpoint = "Salesforce/blip-image-captioning-base"
+ description = (
+ "This is a tool that generates a description of an image. It takes an input named `image` which should be the "
+ "image to caption, and returns a text that contains the description in English."
+ )
+ name = "image_captioner"
+ model_class = AutoModelForVision2Seq
+
+ inputs = ["image"]
+ outputs = ["text"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["vision"])
+ super().__init__(*args, **kwargs)
+
+ def encode(self, image: "Image"):
+ return self.pre_processor(images=image, return_tensors="pt")
+
+ def forward(self, inputs):
+ return self.model.generate(**inputs)
+
+ def decode(self, outputs):
+ return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9d9ef82b514778a363c9cefea301122860382f2
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_question_answering.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+import torch
+
+from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
+from ..utils import requires_backends
+from .base import PipelineTool
+
+
+if TYPE_CHECKING:
+ from PIL import Image
+
+
+class ImageQuestionAnsweringTool(PipelineTool):
+ default_checkpoint = "dandelin/vilt-b32-finetuned-vqa"
+ description = (
+ "This is a tool that answers a question about an image. It takes an input named `image` which should be the "
+ "image containing the information, as well as a `question` which should be the question in English. It "
+ "returns a text that is the answer to the question."
+ )
+ name = "image_qa"
+ pre_processor_class = AutoProcessor
+ model_class = AutoModelForVisualQuestionAnswering
+
+ inputs = ["image", "text"]
+ outputs = ["text"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["vision"])
+ super().__init__(*args, **kwargs)
+
+ def encode(self, image: "Image", question: str):
+ return self.pre_processor(image, question, return_tensors="pt")
+
+ def forward(self, inputs):
+ with torch.no_grad():
+ return self.model(**inputs).logits
+
+ def decode(self, outputs):
+ idx = outputs.argmax(-1).item()
+ return self.model.config.id2label[idx]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_segmentation.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_segmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce2615d8bfd8590fc62ba3e31db582cc43d8eec6
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/image_segmentation.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import numpy as np
+import torch
+
+from ..models.clipseg import CLIPSegForImageSegmentation
+from ..utils import is_vision_available, requires_backends
+from .base import PipelineTool
+
+
+if is_vision_available():
+ from PIL import Image
+
+
+class ImageSegmentationTool(PipelineTool):
+ description = (
+ "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image. "
+ "It takes two arguments named `image` which should be the original image, and `label` which should be a text "
+ "describing the elements what should be identified in the segmentation mask. The tool returns the mask."
+ )
+ default_checkpoint = "CIDAS/clipseg-rd64-refined"
+ name = "image_segmenter"
+ model_class = CLIPSegForImageSegmentation
+
+ inputs = ["image", "text"]
+ outputs = ["image"]
+
+ def __init__(self, *args, **kwargs):
+ requires_backends(self, ["vision"])
+ super().__init__(*args, **kwargs)
+
+ def encode(self, image: "Image", label: str):
+ return self.pre_processor(text=[label], images=[image], padding=True, return_tensors="pt")
+
+ def forward(self, inputs):
+ with torch.no_grad():
+ logits = self.model(**inputs).logits
+ return logits
+
+ def decode(self, outputs):
+ array = outputs.cpu().detach().numpy()
+ array[array <= 0] = 0
+ array[array > 0] = 1
+ return Image.fromarray((array * 255).astype(np.uint8))
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/prompts.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/prompts.py
new file mode 100644
index 0000000000000000000000000000000000000000..2dbb799f859ffe50ff9ca509308a1823f407203f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/prompts.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from ..utils import cached_file
+
+
+# docstyle-ignore
+CHAT_MESSAGE_PROMPT = """
+Human: <>
+
+Assistant: """
+
+
+DEFAULT_PROMPTS_REPO = "huggingface-tools/default-prompts"
+PROMPT_FILES = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
+
+
+def download_prompt(prompt_or_repo_id, agent_name, mode="run"):
+ """
+ Downloads and caches the prompt from a repo and returns it contents (if necessary)
+ """
+ if prompt_or_repo_id is None:
+ prompt_or_repo_id = DEFAULT_PROMPTS_REPO
+
+ # prompt is considered a repo ID when it does not contain any kind of space
+ if re.search("\\s", prompt_or_repo_id) is not None:
+ return prompt_or_repo_id
+
+ prompt_file = cached_file(
+ prompt_or_repo_id, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name}
+ )
+ with open(prompt_file, "r", encoding="utf-8") as f:
+ return f.read()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/python_interpreter.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/python_interpreter.py
new file mode 100644
index 0000000000000000000000000000000000000000..960be1a2a2654918c0cc9820745cefde20e74e9a
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/python_interpreter.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import ast
+import difflib
+from collections.abc import Mapping
+from typing import Any, Callable, Dict
+
+
+class InterpretorError(ValueError):
+ """
+ An error raised when the interpretor cannot evaluate a Python expression, due to syntax error or unsupported
+ operations.
+ """
+
+ pass
+
+
+def evaluate(code: str, tools: Dict[str, Callable], state=None, chat_mode=False):
+ """
+ Evaluate a python expression using the content of the variables stored in a state and only evaluating a given set
+ of functions.
+
+ This function will recurse through the nodes of the tree provided.
+
+ Args:
+ code (`str`):
+ The code to evaluate.
+ tools (`Dict[str, Callable]`):
+ The functions that may be called during the evaluation. Any call to another function will fail with an
+ `InterpretorError`.
+ state (`Dict[str, Any]`):
+ A dictionary mapping variable names to values. The `state` should contain the initial inputs but will be
+ updated by this function to contain all variables as they are evaluated.
+ chat_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not the function is called from `Agent.chat`.
+ """
+ try:
+ expression = ast.parse(code)
+ except SyntaxError as e:
+ print("The code generated by the agent is not valid.\n", e)
+ return
+ if state is None:
+ state = {}
+ result = None
+ for idx, node in enumerate(expression.body):
+ try:
+ line_result = evaluate_ast(node, state, tools)
+ except InterpretorError as e:
+ msg = f"Evaluation of the code stopped at line {idx} before the end because of the following error"
+ if chat_mode:
+ msg += (
+ f". Copy paste the following error message and send it back to the agent:\nI get an error: '{e}'"
+ )
+ else:
+ msg += f":\n{e}"
+ print(msg)
+ break
+ if line_result is not None:
+ result = line_result
+
+ return result
+
+
+def evaluate_ast(expression: ast.AST, state: Dict[str, Any], tools: Dict[str, Callable]):
+ """
+ Evaluate an absract syntax tree using the content of the variables stored in a state and only evaluating a given
+ set of functions.
+
+ This function will recurse trough the nodes of the tree provided.
+
+ Args:
+ expression (`ast.AST`):
+ The code to evaluate, as an abastract syntax tree.
+ state (`Dict[str, Any]`):
+ A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation
+ encounters assignements.
+ tools (`Dict[str, Callable]`):
+ The functions that may be called during the evaluation. Any call to another function will fail with an
+ `InterpretorError`.
+ """
+ if isinstance(expression, ast.Assign):
+ # Assignement -> we evaluate the assignement which should update the state
+ # We return the variable assigned as it may be used to determine the final result.
+ return evaluate_assign(expression, state, tools)
+ elif isinstance(expression, ast.Call):
+ # Function call -> we return the value of the function call
+ return evaluate_call(expression, state, tools)
+ elif isinstance(expression, ast.Constant):
+ # Constant -> just return the value
+ return expression.value
+ elif isinstance(expression, ast.Dict):
+ # Dict -> evaluate all keys and values
+ keys = [evaluate_ast(k, state, tools) for k in expression.keys]
+ values = [evaluate_ast(v, state, tools) for v in expression.values]
+ return dict(zip(keys, values))
+ elif isinstance(expression, ast.Expr):
+ # Expression -> evaluate the content
+ return evaluate_ast(expression.value, state, tools)
+ elif isinstance(expression, ast.For):
+ # For loop -> execute the loop
+ return evaluate_for(expression, state, tools)
+ elif isinstance(expression, ast.FormattedValue):
+ # Formatted value (part of f-string) -> evaluate the content and return
+ return evaluate_ast(expression.value, state, tools)
+ elif isinstance(expression, ast.If):
+ # If -> execute the right branch
+ return evaluate_if(expression, state, tools)
+ elif hasattr(ast, "Index") and isinstance(expression, ast.Index):
+ return evaluate_ast(expression.value, state, tools)
+ elif isinstance(expression, ast.JoinedStr):
+ return "".join([str(evaluate_ast(v, state, tools)) for v in expression.values])
+ elif isinstance(expression, ast.List):
+ # List -> evaluate all elements
+ return [evaluate_ast(elt, state, tools) for elt in expression.elts]
+ elif isinstance(expression, ast.Name):
+ # Name -> pick up the value in the state
+ return evaluate_name(expression, state, tools)
+ elif isinstance(expression, ast.Subscript):
+ # Subscript -> return the value of the indexing
+ return evaluate_subscript(expression, state, tools)
+ else:
+ # For now we refuse anything else. Let's add things as we need them.
+ raise InterpretorError(f"{expression.__class__.__name__} is not supported.")
+
+
+def evaluate_assign(assign, state, tools):
+ var_names = assign.targets
+ result = evaluate_ast(assign.value, state, tools)
+
+ if len(var_names) == 1:
+ state[var_names[0].id] = result
+ else:
+ if len(result) != len(var_names):
+ raise InterpretorError(f"Expected {len(var_names)} values but got {len(result)}.")
+ for var_name, r in zip(var_names, result):
+ state[var_name.id] = r
+ return result
+
+
+def evaluate_call(call, state, tools):
+ if not isinstance(call.func, ast.Name):
+ raise InterpretorError(
+ f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func} of "
+ f"type {type(call.func)}."
+ )
+ func_name = call.func.id
+ if func_name not in tools:
+ raise InterpretorError(
+ f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func.id})."
+ )
+
+ func = tools[func_name]
+ # Todo deal with args
+ args = [evaluate_ast(arg, state, tools) for arg in call.args]
+ kwargs = {keyword.arg: evaluate_ast(keyword.value, state, tools) for keyword in call.keywords}
+ return func(*args, **kwargs)
+
+
+def evaluate_subscript(subscript, state, tools):
+ index = evaluate_ast(subscript.slice, state, tools)
+ value = evaluate_ast(subscript.value, state, tools)
+ if isinstance(value, (list, tuple)):
+ return value[int(index)]
+ if index in value:
+ return value[index]
+ if isinstance(index, str) and isinstance(value, Mapping):
+ close_matches = difflib.get_close_matches(index, list(value.keys()))
+ if len(close_matches) > 0:
+ return value[close_matches[0]]
+
+ raise InterpretorError(f"Could not index {value} with '{index}'.")
+
+
+def evaluate_name(name, state, tools):
+ if name.id in state:
+ return state[name.id]
+ close_matches = difflib.get_close_matches(name.id, list(state.keys()))
+ if len(close_matches) > 0:
+ return state[close_matches[0]]
+ raise InterpretorError(f"The variable `{name.id}` is not defined.")
+
+
+def evaluate_condition(condition, state, tools):
+ if len(condition.ops) > 1:
+ raise InterpretorError("Cannot evaluate conditions with multiple operators")
+
+ left = evaluate_ast(condition.left, state, tools)
+ comparator = condition.ops[0]
+ right = evaluate_ast(condition.comparators[0], state, tools)
+
+ if isinstance(comparator, ast.Eq):
+ return left == right
+ elif isinstance(comparator, ast.NotEq):
+ return left != right
+ elif isinstance(comparator, ast.Lt):
+ return left < right
+ elif isinstance(comparator, ast.LtE):
+ return left <= right
+ elif isinstance(comparator, ast.Gt):
+ return left > right
+ elif isinstance(comparator, ast.GtE):
+ return left >= right
+ elif isinstance(comparator, ast.Is):
+ return left is right
+ elif isinstance(comparator, ast.IsNot):
+ return left is not right
+ elif isinstance(comparator, ast.In):
+ return left in right
+ elif isinstance(comparator, ast.NotIn):
+ return left not in right
+ else:
+ raise InterpretorError(f"Operator not supported: {comparator}")
+
+
+def evaluate_if(if_statement, state, tools):
+ result = None
+ if evaluate_condition(if_statement.test, state, tools):
+ for line in if_statement.body:
+ line_result = evaluate_ast(line, state, tools)
+ if line_result is not None:
+ result = line_result
+ else:
+ for line in if_statement.orelse:
+ line_result = evaluate_ast(line, state, tools)
+ if line_result is not None:
+ result = line_result
+ return result
+
+
+def evaluate_for(for_loop, state, tools):
+ result = None
+ iterator = evaluate_ast(for_loop.iter, state, tools)
+ for counter in iterator:
+ state[for_loop.target.id] = counter
+ for expression in for_loop.body:
+ line_result = evaluate_ast(expression, state, tools)
+ if line_result is not None:
+ result = line_result
+ return result
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/speech_to_text.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/speech_to_text.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3b8fd29ee1ad0809cf8b003df50a470e609400f
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/speech_to_text.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
+from .base import PipelineTool
+
+
+class SpeechToTextTool(PipelineTool):
+ default_checkpoint = "openai/whisper-base"
+ description = (
+ "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
+ "transcribed text."
+ )
+ name = "transcriber"
+ pre_processor_class = WhisperProcessor
+ model_class = WhisperForConditionalGeneration
+
+ inputs = ["audio"]
+ outputs = ["text"]
+
+ def encode(self, audio):
+ return self.pre_processor(audio, return_tensors="pt").input_features
+
+ def forward(self, inputs):
+ return self.model.generate(inputs=inputs)
+
+ def decode(self, outputs):
+ return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_classification.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_classification.py
new file mode 100644
index 0000000000000000000000000000000000000000..f04cdc05b6ac67cd285a1011d83a7bb2854adfe1
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_classification.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+
+from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
+from .base import PipelineTool
+
+
+class TextClassificationTool(PipelineTool):
+ """
+ Example:
+
+ ```py
+ from transformers.tools import TextClassificationTool
+
+ classifier = TextClassificationTool()
+ classifier("This is a super nice API!", labels=["positive", "negative"])
+ ```
+ """
+
+ default_checkpoint = "facebook/bart-large-mnli"
+ description = (
+ "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
+ "should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
+ "It returns the most likely label in the list of provided `labels` for the input text."
+ )
+ name = "text_classifier"
+ pre_processor_class = AutoTokenizer
+ model_class = AutoModelForSequenceClassification
+
+ inputs = ["text", ["text"]]
+ outputs = ["text"]
+
+ def setup(self):
+ super().setup()
+ config = self.model.config
+ self.entailment_id = -1
+ for idx, label in config.id2label.items():
+ if label.lower().startswith("entail"):
+ self.entailment_id = int(idx)
+ if self.entailment_id == -1:
+ raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.")
+
+ def encode(self, text, labels):
+ self._labels = labels
+ return self.pre_processor(
+ [text] * len(labels),
+ [f"This example is {label}" for label in labels],
+ return_tensors="pt",
+ padding="max_length",
+ )
+
+ def decode(self, outputs):
+ logits = outputs.logits
+ label_id = torch.argmax(logits[:, 2]).item()
+ return self._labels[label_id]
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_question_answering.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a7c2fc09a63499871bc729825b812c79348c762
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_question_answering.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer
+from .base import PipelineTool
+
+
+QA_PROMPT = """Here is a text containing a lot of information: '''{text}'''.
+
+Can you answer this question about the text: '{question}'"""
+
+
+class TextQuestionAnsweringTool(PipelineTool):
+ default_checkpoint = "google/flan-t5-base"
+ description = (
+ "This is a tool that answers questions related to a text. It takes two arguments named `text`, which is the "
+ "text where to find the answer, and `question`, which is the question, and returns the answer to the question."
+ )
+ name = "text_qa"
+ pre_processor_class = AutoTokenizer
+ model_class = AutoModelForSeq2SeqLM
+
+ inputs = ["text", "text"]
+ outputs = ["text"]
+
+ def encode(self, text: str, question: str):
+ prompt = QA_PROMPT.format(text=text, question=question)
+ return self.pre_processor(prompt, return_tensors="pt")
+
+ def forward(self, inputs):
+ output_ids = self.model.generate(**inputs)
+
+ in_b, _ = inputs["input_ids"].shape
+ out_b = output_ids.shape[0]
+
+ return output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])[0][0]
+
+ def decode(self, outputs):
+ return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_summarization.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_summarization.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eedf234ae50b51e23e829cae2b8de4f3ad287e5
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_summarization.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer
+from .base import PipelineTool
+
+
+class TextSummarizationTool(PipelineTool):
+ """
+ Example:
+
+ ```py
+ from transformers.tools import TextSummarizationTool
+
+ summarizer = TextSummarizationTool()
+ summarizer(long_text)
+ ```
+ """
+
+ default_checkpoint = "philschmid/bart-large-cnn-samsum"
+ description = (
+ "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
+ "and returns a summary of the text."
+ )
+ name = "summarizer"
+ pre_processor_class = AutoTokenizer
+ model_class = AutoModelForSeq2SeqLM
+
+ inputs = ["text"]
+ outputs = ["text"]
+
+ def encode(self, text):
+ return self.pre_processor(text, return_tensors="pt", truncation=True)
+
+ def forward(self, inputs):
+ return self.model.generate(**inputs)[0]
+
+ def decode(self, outputs):
+ return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_to_speech.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_to_speech.py
new file mode 100644
index 0000000000000000000000000000000000000000..9faed77b01a35c3bd9c9530cd421f02e348a13af
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/text_to_speech.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+
+from ..models.speecht5 import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor
+from ..utils import is_datasets_available
+from .base import PipelineTool
+
+
+if is_datasets_available():
+ from datasets import load_dataset
+
+
+class TextToSpeechTool(PipelineTool):
+ default_checkpoint = "microsoft/speecht5_tts"
+ description = (
+ "This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
+ "text to read (in English) and returns a waveform object containing the sound."
+ )
+ name = "text_reader"
+ pre_processor_class = SpeechT5Processor
+ model_class = SpeechT5ForTextToSpeech
+ post_processor_class = SpeechT5HifiGan
+
+ inputs = ["text"]
+ outputs = ["audio"]
+
+ def setup(self):
+ if self.post_processor is None:
+ self.post_processor = "microsoft/speecht5_hifigan"
+ super().setup()
+
+ def encode(self, text, speaker_embeddings=None):
+ inputs = self.pre_processor(text=text, return_tensors="pt", truncation=True)
+
+ if speaker_embeddings is None:
+ if not is_datasets_available():
+ raise ImportError("Datasets needs to be installed if not passing speaker embeddings.")
+
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
+ speaker_embeddings = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0)
+
+ return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
+
+ def forward(self, inputs):
+ with torch.no_grad():
+ return self.model.generate_speech(**inputs)
+
+ def decode(self, outputs):
+ with torch.no_grad():
+ return self.post_processor(outputs).cpu().detach()
diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/translation.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/translation.py
new file mode 100644
index 0000000000000000000000000000000000000000..50a164d5bd6f4f7b647374484bd20c95e74c5dc9
--- /dev/null
+++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/translation.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer
+from .base import PipelineTool
+
+
+LANGUAGE_CODES = {
+ "Acehnese Arabic": "ace_Arab",
+ "Acehnese Latin": "ace_Latn",
+ "Mesopotamian Arabic": "acm_Arab",
+ "Ta'izzi-Adeni Arabic": "acq_Arab",
+ "Tunisian Arabic": "aeb_Arab",
+ "Afrikaans": "afr_Latn",
+ "South Levantine Arabic": "ajp_Arab",
+ "Akan": "aka_Latn",
+ "Amharic": "amh_Ethi",
+ "North Levantine Arabic": "apc_Arab",
+ "Modern Standard Arabic": "arb_Arab",
+ "Modern Standard Arabic Romanized": "arb_Latn",
+ "Najdi Arabic": "ars_Arab",
+ "Moroccan Arabic": "ary_Arab",
+ "Egyptian Arabic": "arz_Arab",
+ "Assamese": "asm_Beng",
+ "Asturian": "ast_Latn",
+ "Awadhi": "awa_Deva",
+ "Central Aymara": "ayr_Latn",
+ "South Azerbaijani": "azb_Arab",
+ "North Azerbaijani": "azj_Latn",
+ "Bashkir": "bak_Cyrl",
+ "Bambara": "bam_Latn",
+ "Balinese": "ban_Latn",
+ "Belarusian": "bel_Cyrl",
+ "Bemba": "bem_Latn",
+ "Bengali": "ben_Beng",
+ "Bhojpuri": "bho_Deva",
+ "Banjar Arabic": "bjn_Arab",
+ "Banjar Latin": "bjn_Latn",
+ "Standard Tibetan": "bod_Tibt",
+ "Bosnian": "bos_Latn",
+ "Buginese": "bug_Latn",
+ "Bulgarian": "bul_Cyrl",
+ "Catalan": "cat_Latn",
+ "Cebuano": "ceb_Latn",
+ "Czech": "ces_Latn",
+ "Chokwe": "cjk_Latn",
+ "Central Kurdish": "ckb_Arab",
+ "Crimean Tatar": "crh_Latn",
+ "Welsh": "cym_Latn",
+ "Danish": "dan_Latn",
+ "German": "deu_Latn",
+ "Southwestern Dinka": "dik_Latn",
+ "Dyula": "dyu_Latn",
+ "Dzongkha": "dzo_Tibt",
+ "Greek": "ell_Grek",
+ "English": "eng_Latn",
+ "Esperanto": "epo_Latn",
+ "Estonian": "est_Latn",
+ "Basque": "eus_Latn",
+ "Ewe": "ewe_Latn",
+ "Faroese": "fao_Latn",
+ "Fijian": "fij_Latn",
+ "Finnish": "fin_Latn",
+ "Fon": "fon_Latn",
+ "French": "fra_Latn",
+ "Friulian": "fur_Latn",
+ "Nigerian Fulfulde": "fuv_Latn",
+ "Scottish Gaelic": "gla_Latn",
+ "Irish": "gle_Latn",
+ "Galician": "glg_Latn",
+ "Guarani": "grn_Latn",
+ "Gujarati": "guj_Gujr",
+ "Haitian Creole": "hat_Latn",
+ "Hausa": "hau_Latn",
+ "Hebrew": "heb_Hebr",
+ "Hindi": "hin_Deva",
+ "Chhattisgarhi": "hne_Deva",
+ "Croatian": "hrv_Latn",
+ "Hungarian": "hun_Latn",
+ "Armenian": "hye_Armn",
+ "Igbo": "ibo_Latn",
+ "Ilocano": "ilo_Latn",
+ "Indonesian": "ind_Latn",
+ "Icelandic": "isl_Latn",
+ "Italian": "ita_Latn",
+ "Javanese": "jav_Latn",
+ "Japanese": "jpn_Jpan",
+ "Kabyle": "kab_Latn",
+ "Jingpho": "kac_Latn",
+ "Kamba": "kam_Latn",
+ "Kannada": "kan_Knda",
+ "Kashmiri Arabic": "kas_Arab",
+ "Kashmiri Devanagari": "kas_Deva",
+ "Georgian": "kat_Geor",
+ "Central Kanuri Arabic": "knc_Arab",
+ "Central Kanuri Latin": "knc_Latn",
+ "Kazakh": "kaz_Cyrl",
+ "Kabiyè": "kbp_Latn",
+ "Kabuverdianu": "kea_Latn",
+ "Khmer": "khm_Khmr",
+ "Kikuyu": "kik_Latn",
+ "Kinyarwanda": "kin_Latn",
+ "Kyrgyz": "kir_Cyrl",
+ "Kimbundu": "kmb_Latn",
+ "Northern Kurdish": "kmr_Latn",
+ "Kikongo": "kon_Latn",
+ "Korean": "kor_Hang",
+ "Lao": "lao_Laoo",
+ "Ligurian": "lij_Latn",
+ "Limburgish": "lim_Latn",
+ "Lingala": "lin_Latn",
+ "Lithuanian": "lit_Latn",
+ "Lombard": "lmo_Latn",
+ "Latgalian": "ltg_Latn",
+ "Luxembourgish": "ltz_Latn",
+ "Luba-Kasai": "lua_Latn",
+ "Ganda": "lug_Latn",
+ "Luo": "luo_Latn",
+ "Mizo": "lus_Latn",
+ "Standard Latvian": "lvs_Latn",
+ "Magahi": "mag_Deva",
+ "Maithili": "mai_Deva",
+ "Malayalam": "mal_Mlym",
+ "Marathi": "mar_Deva",
+ "Minangkabau Arabic ": "min_Arab",
+ "Minangkabau Latin": "min_Latn",
+ "Macedonian": "mkd_Cyrl",
+ "Plateau Malagasy": "plt_Latn",
+ "Maltese": "mlt_Latn",
+ "Meitei Bengali": "mni_Beng",
+ "Halh Mongolian": "khk_Cyrl",
+ "Mossi": "mos_Latn",
+ "Maori": "mri_Latn",
+ "Burmese": "mya_Mymr",
+ "Dutch": "nld_Latn",
+ "Norwegian Nynorsk": "nno_Latn",
+ "Norwegian Bokmål": "nob_Latn",
+ "Nepali": "npi_Deva",
+ "Northern Sotho": "nso_Latn",
+ "Nuer": "nus_Latn",
+ "Nyanja": "nya_Latn",
+ "Occitan": "oci_Latn",
+ "West Central Oromo": "gaz_Latn",
+ "Odia": "ory_Orya",
+ "Pangasinan": "pag_Latn",
+ "Eastern Panjabi": "pan_Guru",
+ "Papiamento": "pap_Latn",
+ "Western Persian": "pes_Arab",
+ "Polish": "pol_Latn",
+ "Portuguese": "por_Latn",
+ "Dari": "prs_Arab",
+ "Southern Pashto": "pbt_Arab",
+ "Ayacucho Quechua": "quy_Latn",
+ "Romanian": "ron_Latn",
+ "Rundi": "run_Latn",
+ "Russian": "rus_Cyrl",
+ "Sango": "sag_Latn",
+ "Sanskrit": "san_Deva",
+ "Santali": "sat_Olck",
+ "Sicilian": "scn_Latn",
+ "Shan": "shn_Mymr",
+ "Sinhala": "sin_Sinh",
+ "Slovak": "slk_Latn",
+ "Slovenian": "slv_Latn",
+ "Samoan": "smo_Latn",
+ "Shona": "sna_Latn",
+ "Sindhi": "snd_Arab",
+ "Somali": "som_Latn",
+ "Southern Sotho": "sot_Latn",
+ "Spanish": "spa_Latn",
+ "Tosk Albanian": "als_Latn",
+ "Sardinian": "srd_Latn",
+ "Serbian": "srp_Cyrl",
+ "Swati": "ssw_Latn",
+ "Sundanese": "sun_Latn",
+ "Swedish": "swe_Latn",
+ "Swahili": "swh_Latn",
+ "Silesian": "szl_Latn",
+ "Tamil": "tam_Taml",
+ "Tatar": "tat_Cyrl",
+ "Telugu": "tel_Telu",
+ "Tajik": "tgk_Cyrl",
+ "Tagalog": "tgl_Latn",
+ "Thai": "tha_Thai",
+ "Tigrinya": "tir_Ethi",
+ "Tamasheq Latin": "taq_Latn",
+ "Tamasheq Tifinagh": "taq_Tfng",
+ "Tok Pisin": "tpi_Latn",
+ "Tswana": "tsn_Latn",
+ "Tsonga": "tso_Latn",
+ "Turkmen": "tuk_Latn",
+ "Tumbuka": "tum_Latn",
+ "Turkish": "tur_Latn",
+ "Twi": "twi_Latn",
+ "Central Atlas Tamazight": "tzm_Tfng",
+ "Uyghur": "uig_Arab",
+ "Ukrainian": "ukr_Cyrl",
+ "Umbundu": "umb_Latn",
+ "Urdu": "urd_Arab",
+ "Northern Uzbek": "uzn_Latn",
+ "Venetian": "vec_Latn",
+ "Vietnamese": "vie_Latn",
+ "Waray": "war_Latn",
+ "Wolof": "wol_Latn",
+ "Xhosa": "xho_Latn",
+ "Eastern Yiddish": "ydd_Hebr",
+ "Yoruba": "yor_Latn",
+ "Yue Chinese": "yue_Hant",
+ "Chinese Simplified": "zho_Hans",
+ "Chinese Traditional": "zho_Hant",
+ "Standard Malay": "zsm_Latn",
+ "Zulu": "zul_Latn",
+}
+
+
+class TranslationTool(PipelineTool):
+ """
+ Example:
+
+ ```py
+ from transformers.tools import TranslationTool
+
+ translator = TranslationTool()
+ translator("This is a super nice API!", src_lang="English", tgt_lang="French")
+ ```
+ """
+
+ default_checkpoint = "facebook/nllb-200-distilled-600M"
+ description = (
+ "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
+ "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
+ "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
+ "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
+ )
+ name = "translator"
+ pre_processor_class = AutoTokenizer
+ model_class = AutoModelForSeq2SeqLM
+ lang_to_code = LANGUAGE_CODES
+
+ inputs = ["text", "text", "text"]
+ outputs = ["text"]
+
+ def encode(self, text, src_lang, tgt_lang):
+ if src_lang not in self.lang_to_code:
+ raise ValueError(f"{src_lang} is not a supported language.")
+ if tgt_lang not in self.lang_to_code:
+ raise ValueError(f"{tgt_lang} is not a supported language.")
+ src_lang = self.lang_to_code[src_lang]
+ tgt_lang = self.lang_to_code[tgt_lang]
+ return self.pre_processor._build_translation_inputs(
+ text, return_tensors="pt", src_lang=src_lang, tgt_lang=tgt_lang
+ )
+
+ def forward(self, inputs):
+ return self.model.generate(**inputs)
+
+ def decode(self, outputs):
+ return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True)