diff --git a/ckpts/universal/global_step20/zero/6.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/6.attention.query_key_value.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..4cf3c41f795f684502dfeea450a74d43b39fec11
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/6.attention.query_key_value.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfa5af5f97e2261795962c3795a71294432a85c195b619cab660207907a6dfb6
+size 50332749
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8609f626a25d5f37d41ab8a312dfe226e44dbbd8
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "abstract_algebra"
+"description": "The following are multiple choice questions (with answers) about abstract\
+ \ algebra.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_abstract_algebra"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e5ffd8ffe302442af98c246e9d7bac54c063d81f
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "astronomy"
+"description": "The following are multiple choice questions (with answers) about astronomy.\n\
+ \n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_astronomy"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a64285711f2f23775ddc37431b5c39f5a589f9ec
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "business_ethics"
+"description": "The following are multiple choice questions (with answers) about business\
+ \ ethics.\n\n"
+"group": "mmlu_flan_cot_zeroshot_other"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_business_ethics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ae4bda965ef4dd839b400959b391a71f1fcddcd3
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "computer_security"
+"description": "The following are multiple choice questions (with answers) about computer\
+ \ security.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_computer_security"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2e1e43dbad9432de41c580779108843761280313
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "conceptual_physics"
+"description": "The following are multiple choice questions (with answers) about conceptual\
+ \ physics.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_conceptual_physics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9ff25bba4657133ff33a491c641589aed6476114
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "econometrics"
+"description": "The following are multiple choice questions (with answers) about econometrics.\n\
+ \n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_econometrics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ca10a43e910d6fe090af53ffaf90e645e1ad69a1
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "electrical_engineering"
+"description": "The following are multiple choice questions (with answers) about electrical\
+ \ engineering.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_electrical_engineering"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ec2d323cae468b5efd9739929a0822dfb853e233
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "formal_logic"
+"description": "The following are multiple choice questions (with answers) about formal\
+ \ logic.\n\n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_formal_logic"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e5794db64588edef17d3c396f96ef870383cfa3
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_biology"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school biology.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_biology"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eba398b0393383621f3d688ea5356409eb56b215
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_chemistry"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school chemistry.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_chemistry"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..54eafb51d385f7afd35a78d2ed8098565d1c5297
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_european_history"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school european history.\n\n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_european_history"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0898c87664e5250530d6998337c8fc601e1b876d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_geography"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school geography.\n\n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_geography"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dff0960afbb669c59b6159b45a5a474110c0d770
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_mathematics"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school mathematics.\n\n"
+"group": "mmlu_flan_cot_zeroshot_stem"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_mathematics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d5d477233122391157ebe6ce3b817902c5a39712
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_psychology"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school psychology.\n\n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_psychology"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c89dd0faa47730d507b7337abbf38e00879389b5
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_world_history"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school world history.\n\n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_high_school_world_history"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d777e9fc81262b0ff745fb2f9c82376c669c15df
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "international_law"
+"description": "The following are multiple choice questions (with answers) about international\
+ \ law.\n\n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_international_law"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b7164c1cfcb00b9359809173da72dad15383143c
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "management"
+"description": "The following are multiple choice questions (with answers) about management.\n\
+ \n"
+"group": "mmlu_flan_cot_zeroshot_other"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_management"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a4595f06991b1096e49325c897fbe6f0b3eea6c2
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "moral_disputes"
+"description": "The following are multiple choice questions (with answers) about moral\
+ \ disputes.\n\n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_moral_disputes"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..534707cb2b08c1605f0bfeeabcbe8ab0bd372038
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "philosophy"
+"description": "The following are multiple choice questions (with answers) about philosophy.\n\
+ \n"
+"group": "mmlu_flan_cot_zeroshot_humanities"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_philosophy"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cc055d5bacacf77b1dd5f70b68bbaa81d1aad2ff
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "professional_psychology"
+"description": "The following are multiple choice questions (with answers) about professional\
+ \ psychology.\n\n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_professional_psychology"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..14d02c3a3c015e78cb780c646461fb7ac70a5ce4
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "public_relations"
+"description": "The following are multiple choice questions (with answers) about public\
+ \ relations.\n\n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_public_relations"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..45b94193c55ac43e7ee6dc33462e128748a68c21
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "sociology"
+"description": "The following are multiple choice questions (with answers) about sociology.\n\
+ \n"
+"group": "mmlu_flan_cot_zeroshot_social_sciences"
+"include": "_mmlu_flan_cot_zeroshot_template_yaml"
+"task": "mmlu_flan_cot_zeroshot_sociology"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..72246935de8cf0cf8b256fd1e6c87dfbbb90a2ad
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py
@@ -0,0 +1,112 @@
+import re
+import sys
+import unicodedata
+
+from lm_eval.filters.extraction import RegexFilter
+
+
+class MultiChoiceRegexFilter(RegexFilter):
+ """ """
+
+ def __init__(
+ self,
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
+ group_select=0,
+ fallback: str = "[invalid]",
+ ignore_case=False,
+ ignore_punctuation=False,
+ regexes_to_ignore=None,
+ ) -> None:
+ """
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
+ group_select: Selects the (group_select)th match from the findall result.
+ ignore_case: Ignores the case during step 1 matching
+ ignore_punctuation: Remove the punctuation during step 1 matching
+ regexes_to_ignore: Remove these regexes during step 1 matching
+ """
+ super().__init__(regex_pattern, group_select, fallback)
+ self.ignore_case = ignore_case
+ self.ignore_punctuation = ignore_punctuation
+ self.regexes_to_ignore = regexes_to_ignore
+
+ def apply(self, resps, docs):
+ # here, we assume we have a list, in which each element is
+ # a list of model responses for some particular input/target pair.
+ # so we process each of these (same input/target response sets)
+ # independently (and keep them a list.)
+
+ def find_match(regex, resp, convert_dict={}):
+ match = regex.findall(resp)
+ if match:
+ match = match[self.group_select]
+ if isinstance(match, tuple):
+ match = [m for m in match if m][0]
+ match = match.strip()
+ if match and match in convert_dict:
+ match = convert_dict[match]
+ return match
+
+ punct_tbl = dict.fromkeys(
+ i
+ for i in range(sys.maxunicode)
+ if unicodedata.category(chr(i)).startswith("P")
+ )
+
+ def filter_ignores(st):
+ if self.regexes_to_ignore is not None:
+ for s in self.regexes_to_ignore:
+ st = re.sub(s, "", st)
+
+ if self.ignore_case:
+ st = st.lower()
+
+ if self.ignore_punctuation:
+ # https://stackoverflow.com/a/266162
+ st = st.translate(punct_tbl)
+ return st
+
+ filtered_resps = []
+
+ for r, doc in zip(resps, docs):
+ fallback_regexes = []
+ choice_to_alpha = {}
+ next_alpha = "A"
+
+ without_paren_fallback_regexes = []
+ without_paren_to_target = {}
+
+ choices = doc["choices"]
+ for c in choices:
+ m = filter_ignores(c.strip())
+ fallback_regexes.append(f"{re.escape(m)}")
+ choice_to_alpha[m] = f"({next_alpha})"
+
+ without_paren_fallback_regexes.append(next_alpha)
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
+
+ next_alpha = chr(ord(next_alpha) + 1)
+ fallback_regex = re.compile("|".join(fallback_regexes))
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
+ without_paren_fallback_regex = re.compile(
+ f":[\s]*({without_paren_fallback_regex})"
+ )
+
+ filtered = []
+ for resp in r:
+ match = find_match(self.regex, resp)
+ if not match:
+ match = find_match(
+ fallback_regex, filter_ignores(resp), choice_to_alpha
+ )
+ if not match:
+ match = find_match(
+ without_paren_fallback_regex, resp, without_paren_to_target
+ )
+ if not match:
+ match = self.fallback
+ filtered.append(match)
+ filtered_resps.append(filtered)
+
+ return filtered_resps
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5f71dbcfa10f829b6514d652933b2cc94eb77bd2
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "astronomy"
+"description": "The following are multiple choice questions (with answers) about astronomy.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_astronomy"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..54dc204d2431face1dcd41235b8bb3679dff3496
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "business_ethics"
+"description": "The following are multiple choice questions (with answers) about business\
+ \ ethics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_business_ethics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..121b3c22efeebfcb3fe06839f8fec1d43a6a9831
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "clinical_knowledge"
+"description": "The following are multiple choice questions (with answers) about clinical\
+ \ knowledge.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_clinical_knowledge"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e4bdbdd67206016451228bbfa7b318279f9b43dc
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "college_computer_science"
+"description": "The following are multiple choice questions (with answers) about college\
+ \ computer science.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_college_computer_science"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c45a6c9c138cbe906aa7a9207f472b1b92d8522d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "college_medicine"
+"description": "The following are multiple choice questions (with answers) about college\
+ \ medicine.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_college_medicine"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d325f9796f9826f38dfae5c796d58cffef18ad2e
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "college_physics"
+"description": "The following are multiple choice questions (with answers) about college\
+ \ physics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_college_physics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5b0a75ff1e2981e4741a2dde05f0663cd10aea1d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "computer_security"
+"description": "The following are multiple choice questions (with answers) about computer\
+ \ security.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_computer_security"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..146d4847d816e546c0ec817cd2e236f453d6bae1
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "econometrics"
+"description": "The following are multiple choice questions (with answers) about econometrics.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_econometrics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..61cb27e22855e169d30f7fdcd71a18f42afc1ce3
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "electrical_engineering"
+"description": "The following are multiple choice questions (with answers) about electrical\
+ \ engineering.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_electrical_engineering"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..39e10f856c003cc0c382a088278c2a44ee0ad92a
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "elementary_mathematics"
+"description": "The following are multiple choice questions (with answers) about elementary\
+ \ mathematics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_elementary_mathematics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7fb8aa923735d58bc22d32f93f556cfe54cd66af
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "formal_logic"
+"description": "The following are multiple choice questions (with answers) about formal\
+ \ logic.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_humanities"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_formal_logic"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5ffc9069ac1e7ee42eada962ee7c7a5146b05be6
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "global_facts"
+"description": "The following are multiple choice questions (with answers) about global\
+ \ facts.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_global_facts"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..328b47f8bc141d6f465f38ebd634c1dece5d0269
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_biology"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school biology.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_biology"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..350583752e02786d1f21eccb8118ca9c8f0e1af8
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_chemistry"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school chemistry.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_chemistry"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd2e1285a9b53895c766e23ea33a859d8fa81218
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_computer_science"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school computer science.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_computer_science"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2e8d83f450497a1c2abf1dbc520ce44e31ff199
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_geography"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school geography.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_geography"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b72fb19b7148583e017f51d6df5e66bb45eeb53
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_government_and_politics"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school government and politics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_government_and_politics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1ddd6df3de4895cc83be706567ab92b10351c053
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_mathematics"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school mathematics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_mathematics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8664736255b9f7d55945cb40fd285a49b6a626f3
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "high_school_world_history"
+"description": "The following are multiple choice questions (with answers) about high\
+ \ school world history.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_humanities"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_high_school_world_history"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d0a0179e675716b33feb140c268b4926ac6b46d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "human_aging"
+"description": "The following are multiple choice questions (with answers) about human\
+ \ aging.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_human_aging"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ef1cb184e345dae777028b123717f38ac3c8c63
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "jurisprudence"
+"description": "The following are multiple choice questions (with answers) about jurisprudence.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_humanities"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_jurisprudence"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fccc7058b5b5598dbe7efebf9f04c484bc071388
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "machine_learning"
+"description": "The following are multiple choice questions (with answers) about machine\
+ \ learning.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_stem"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_machine_learning"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3537a86b933669b1f2cce6362184eb6bf61988e7
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "marketing"
+"description": "The following are multiple choice questions (with answers) about marketing.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_marketing"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..49247525eadd9ca9e8368d8f2d0606a97d837e0d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "medical_genetics"
+"description": "The following are multiple choice questions (with answers) about medical\
+ \ genetics.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_other"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_medical_genetics"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ff46f425aed8740eb3fe349fd5afebb6b079a06
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "moral_disputes"
+"description": "The following are multiple choice questions (with answers) about moral\
+ \ disputes.\n\n"
+"group": "mmlu_flan_n_shot_loglikelihood_humanities"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_moral_disputes"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..944b44a14477a42d7662075521a6edeafa778685
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "philosophy"
+"description": "The following are multiple choice questions (with answers) about philosophy.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_humanities"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_philosophy"
diff --git a/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_sociology.yaml b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_sociology.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..630d16925febdb8ef943ea5d7b5faa649f487a9d
--- /dev/null
+++ b/lm-evaluation-harness/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_sociology.yaml
@@ -0,0 +1,6 @@
+"dataset_name": "sociology"
+"description": "The following are multiple choice questions (with answers) about sociology.\n\
+ \n"
+"group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
+"include": "_mmlu_flan_loglikelihood_template_yaml"
+"task": "mmlu_flan_n_shot_loglikelihood_sociology"
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb1b20a331fe11dfa687c7550685de296ebafbe0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__init__.py
@@ -0,0 +1,127 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config", "DebertaV2OnnxConfig"],
+ "tokenization_deberta_v2": ["DebertaV2Tokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_deberta_v2_fast"] = ["DebertaV2TokenizerFast"]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_deberta_v2"] = [
+ "TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFDebertaV2ForMaskedLM",
+ "TFDebertaV2ForQuestionAnswering",
+ "TFDebertaV2ForMultipleChoice",
+ "TFDebertaV2ForSequenceClassification",
+ "TFDebertaV2ForTokenClassification",
+ "TFDebertaV2Model",
+ "TFDebertaV2PreTrainedModel",
+ ]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_deberta_v2"] = [
+ "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "DebertaV2ForMaskedLM",
+ "DebertaV2ForMultipleChoice",
+ "DebertaV2ForQuestionAnswering",
+ "DebertaV2ForSequenceClassification",
+ "DebertaV2ForTokenClassification",
+ "DebertaV2Model",
+ "DebertaV2PreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_deberta_v2 import (
+ DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ DebertaV2Config,
+ DebertaV2OnnxConfig,
+ )
+ from .tokenization_deberta_v2 import DebertaV2Tokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_deberta_v2_fast import DebertaV2TokenizerFast
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_deberta_v2 import (
+ TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFDebertaV2ForMaskedLM,
+ TFDebertaV2ForMultipleChoice,
+ TFDebertaV2ForQuestionAnswering,
+ TFDebertaV2ForSequenceClassification,
+ TFDebertaV2ForTokenClassification,
+ TFDebertaV2Model,
+ TFDebertaV2PreTrainedModel,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_deberta_v2 import (
+ DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
+ DebertaV2ForMaskedLM,
+ DebertaV2ForMultipleChoice,
+ DebertaV2ForQuestionAnswering,
+ DebertaV2ForSequenceClassification,
+ DebertaV2ForTokenClassification,
+ DebertaV2Model,
+ DebertaV2PreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fface98738a054b13b4674fed290dd92f2bf8fa1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2be8a21a17e8bdc31771a31d45902f8df3125944
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/configuration_deberta_v2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6710dcd82db25fe5abeca172edffd293b8671e28
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_deberta_v2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56b48ee10a2239a6742cb6b5a869943af078fd0a
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/modeling_tf_deberta_v2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..496a84a207bc0e709c1fa6d95c79a19b91abc30e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d13b10ec2118cf482b7296044c04bf9473515242
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/__pycache__/tokenization_deberta_v2_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..25348849e2f240e2e284d501282b9cf9266b8b00
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/configuration_deberta_v2.py
@@ -0,0 +1,192 @@
+# coding=utf-8
+# Copyright 2020, Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" DeBERTa-v2 model configuration"""
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DebertaV2Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`DebertaV2Model`]. It is used to instantiate a
+ DeBERTa-v2 model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the DeBERTa
+ [microsoft/deberta-v2-xlarge](https://huggingface.co/microsoft/deberta-v2-xlarge) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Arguments:
+ vocab_size (`int`, *optional*, defaults to 128100):
+ Vocabulary size of the DeBERTa-v2 model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`DebertaV2Model`].
+ hidden_size (`int`, *optional*, defaults to 1536):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 24):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 6144):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
+ are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 0):
+ The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-7):
+ The epsilon used by the layer normalization layers.
+ relative_attention (`bool`, *optional*, defaults to `True`):
+ Whether use relative position encoding.
+ max_relative_positions (`int`, *optional*, defaults to -1):
+ The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
+ as `max_position_embeddings`.
+ pad_token_id (`int`, *optional*, defaults to 0):
+ The value used to pad input_ids.
+ position_biased_input (`bool`, *optional*, defaults to `True`):
+ Whether add absolute position embedding to content embedding.
+ pos_att_type (`List[str]`, *optional*):
+ The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
+ `["p2c", "c2p"]`, `["p2c", "c2p"]`.
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+
+ Example:
+
+ ```python
+ >>> from transformers import DebertaV2Config, DebertaV2Model
+
+ >>> # Initializing a DeBERTa-v2 microsoft/deberta-v2-xlarge style configuration
+ >>> configuration = DebertaV2Config()
+
+ >>> # Initializing a model (with random weights) from the microsoft/deberta-v2-xlarge style configuration
+ >>> model = DebertaV2Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "deberta-v2"
+
+ def __init__(
+ self,
+ vocab_size=128100,
+ hidden_size=1536,
+ num_hidden_layers=24,
+ num_attention_heads=24,
+ intermediate_size=6144,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=0,
+ initializer_range=0.02,
+ layer_norm_eps=1e-7,
+ relative_attention=False,
+ max_relative_positions=-1,
+ pad_token_id=0,
+ position_biased_input=True,
+ pos_att_type=None,
+ pooler_dropout=0,
+ pooler_hidden_act="gelu",
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.relative_attention = relative_attention
+ self.max_relative_positions = max_relative_positions
+ self.pad_token_id = pad_token_id
+ self.position_biased_input = position_biased_input
+
+ # Backwards compatibility
+ if isinstance(pos_att_type, str):
+ pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
+
+ self.pos_att_type = pos_att_type
+ self.vocab_size = vocab_size
+ self.layer_norm_eps = layer_norm_eps
+
+ self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
+ self.pooler_dropout = pooler_dropout
+ self.pooler_hidden_act = pooler_hidden_act
+
+
+class DebertaV2OnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task == "multiple-choice":
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
+ else:
+ dynamic_axis = {0: "batch", 1: "sequence"}
+ if self._config.type_vocab_size > 0:
+ return OrderedDict(
+ [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)]
+ )
+ else:
+ return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)])
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 12
+
+ def generate_dummy_inputs(
+ self,
+ preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],
+ batch_size: int = -1,
+ seq_length: int = -1,
+ num_choices: int = -1,
+ is_pair: bool = False,
+ framework: Optional["TensorType"] = None,
+ num_channels: int = 3,
+ image_width: int = 40,
+ image_height: int = 40,
+ tokenizer: "PreTrainedTokenizerBase" = None,
+ ) -> Mapping[str, Any]:
+ dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
+ if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
+ del dummy_inputs["token_type_ids"]
+ return dummy_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfe18b0d4964af94a2790c0d76b56ed3caacaeff
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_deberta_v2.py
@@ -0,0 +1,1629 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the Hugging Face Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch DeBERTa-v2 model."""
+
+from collections.abc import Sequence
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutput,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import softmax_backward_data
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_deberta_v2 import DebertaV2Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "DebertaV2Config"
+_CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge"
+_QA_TARGET_START_INDEX = 2
+_QA_TARGET_END_INDEX = 9
+
+
+from ..deprecated._archive_maps import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.deberta.modeling_deberta.ContextPooler
+class ContextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
+ self.dropout = StableDropout(config.pooler_dropout)
+ self.config = config
+
+ def forward(self, hidden_states):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+
+ context_token = hidden_states[:, 0]
+ context_token = self.dropout(context_token)
+ pooled_output = self.dense(context_token)
+ pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
+ return pooled_output
+
+ @property
+ def output_dim(self):
+ return self.config.hidden_size
+
+
+# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2
+class XSoftmax(torch.autograd.Function):
+ """
+ Masked Softmax which is optimized for saving memory
+
+ Args:
+ input (`torch.tensor`): The input tensor that will apply softmax.
+ mask (`torch.IntTensor`):
+ The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
+ dim (int): The dimension that will apply softmax
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax
+
+ >>> # Make a tensor
+ >>> x = torch.randn([4, 20, 100])
+
+ >>> # Create a mask
+ >>> mask = (x > 0).int()
+
+ >>> # Specify the dimension to apply softmax
+ >>> dim = -1
+
+ >>> y = XSoftmax.apply(x, mask, dim)
+ ```"""
+
+ @staticmethod
+ def forward(self, input, mask, dim):
+ self.dim = dim
+ rmask = ~(mask.to(torch.bool))
+
+ output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))
+ output = torch.softmax(output, self.dim)
+ output.masked_fill_(rmask, 0)
+ self.save_for_backward(output)
+ return output
+
+ @staticmethod
+ def backward(self, grad_output):
+ (output,) = self.saved_tensors
+ inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output)
+ return inputGrad, None, None
+
+ @staticmethod
+ def symbolic(g, self, mask, dim):
+ import torch.onnx.symbolic_helper as sym_help
+ from torch.onnx.symbolic_opset9 import masked_fill, softmax
+
+ mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"])
+ r_mask = g.op(
+ "Cast",
+ g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value),
+ to_i=sym_help.cast_pytorch_to_onnx["Bool"],
+ )
+ output = masked_fill(
+ g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min))
+ )
+ output = softmax(g, output, dim)
+ return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool)))
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DropoutContext
+class DropoutContext(object):
+ def __init__(self):
+ self.dropout = 0
+ self.mask = None
+ self.scale = 1
+ self.reuse_mask = True
+
+
+# Copied from transformers.models.deberta.modeling_deberta.get_mask
+def get_mask(input, local_context):
+ if not isinstance(local_context, DropoutContext):
+ dropout = local_context
+ mask = None
+ else:
+ dropout = local_context.dropout
+ dropout *= local_context.scale
+ mask = local_context.mask if local_context.reuse_mask else None
+
+ if dropout > 0 and mask is None:
+ mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool)
+
+ if isinstance(local_context, DropoutContext):
+ if local_context.mask is None:
+ local_context.mask = mask
+
+ return mask, dropout
+
+
+# Copied from transformers.models.deberta.modeling_deberta.XDropout
+class XDropout(torch.autograd.Function):
+ """Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
+
+ @staticmethod
+ def forward(ctx, input, local_ctx):
+ mask, dropout = get_mask(input, local_ctx)
+ ctx.scale = 1.0 / (1 - dropout)
+ if dropout > 0:
+ ctx.save_for_backward(mask)
+ return input.masked_fill(mask, 0) * ctx.scale
+ else:
+ return input
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ if ctx.scale > 1:
+ (mask,) = ctx.saved_tensors
+ return grad_output.masked_fill(mask, 0) * ctx.scale, None
+ else:
+ return grad_output, None
+
+ @staticmethod
+ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value:
+ from torch.onnx import symbolic_opset12
+
+ dropout_p = local_ctx
+ if isinstance(local_ctx, DropoutContext):
+ dropout_p = local_ctx.dropout
+ # StableDropout only calls this function when training.
+ train = True
+ # TODO: We should check if the opset_version being used to export
+ # is > 12 here, but there's no good way to do that. As-is, if the
+ # opset_version < 12, export will fail with a CheckerError.
+ # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like:
+ # if opset_version < 12:
+ # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train)
+ return symbolic_opset12.dropout(g, input, dropout_p, train)
+
+
+# Copied from transformers.models.deberta.modeling_deberta.StableDropout
+class StableDropout(nn.Module):
+ """
+ Optimized dropout module for stabilizing the training
+
+ Args:
+ drop_prob (float): the dropout probabilities
+ """
+
+ def __init__(self, drop_prob):
+ super().__init__()
+ self.drop_prob = drop_prob
+ self.count = 0
+ self.context_stack = None
+
+ def forward(self, x):
+ """
+ Call the module
+
+ Args:
+ x (`torch.tensor`): The input tensor to apply dropout
+ """
+ if self.training and self.drop_prob > 0:
+ return XDropout.apply(x, self.get_context())
+ return x
+
+ def clear_context(self):
+ self.count = 0
+ self.context_stack = None
+
+ def init_context(self, reuse_mask=True, scale=1):
+ if self.context_stack is None:
+ self.context_stack = []
+ self.count = 0
+ for c in self.context_stack:
+ c.reuse_mask = reuse_mask
+ c.scale = scale
+
+ def get_context(self):
+ if self.context_stack is not None:
+ if self.count >= len(self.context_stack):
+ self.context_stack.append(DropoutContext())
+ ctx = self.context_stack[self.count]
+ ctx.dropout = self.drop_prob
+ self.count += 1
+ return ctx
+ else:
+ return self.drop_prob
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm
+class DebertaV2SelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2
+class DebertaV2Attention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = DisentangledSelfAttention(config)
+ self.output = DebertaV2SelfOutput(config)
+ self.config = config
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ self_output = self.self(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ self_output, att_matrix = self_output
+ if query_states is None:
+ query_states = hidden_states
+ attention_output = self.output(self_output, query_states)
+
+ if output_attentions:
+ return (attention_output, att_matrix)
+ else:
+ return attention_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2
+class DebertaV2Intermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm
+class DebertaV2Output(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def forward(self, hidden_states, input_tensor):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2
+class DebertaV2Layer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = DebertaV2Attention(config)
+ self.intermediate = DebertaV2Intermediate(config)
+ self.output = DebertaV2Output(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ output_attentions=False,
+ ):
+ attention_output = self.attention(
+ hidden_states,
+ attention_mask,
+ output_attentions=output_attentions,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ if output_attentions:
+ attention_output, att_matrix = attention_output
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ if output_attentions:
+ return (layer_output, att_matrix)
+ else:
+ return layer_output
+
+
+class ConvLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ kernel_size = getattr(config, "conv_kernel_size", 3)
+ groups = getattr(config, "conv_groups", 1)
+ self.conv_act = getattr(config, "conv_act", "tanh")
+ self.conv = nn.Conv1d(
+ config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups
+ )
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def forward(self, hidden_states, residual_states, input_mask):
+ out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
+ rmask = (1 - input_mask).bool()
+ out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0)
+ out = ACT2FN[self.conv_act](self.dropout(out))
+
+ layer_norm_input = residual_states + out
+ output = self.LayerNorm(layer_norm_input).to(layer_norm_input)
+
+ if input_mask is None:
+ output_states = output
+ else:
+ if input_mask.dim() != layer_norm_input.dim():
+ if input_mask.dim() == 4:
+ input_mask = input_mask.squeeze(1).squeeze(1)
+ input_mask = input_mask.unsqueeze(2)
+
+ input_mask = input_mask.to(output.dtype)
+ output_states = output * input_mask
+
+ return output_states
+
+
+class DebertaV2Encoder(nn.Module):
+ """Modified BertEncoder with relative position bias support"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)])
+ self.relative_attention = getattr(config, "relative_attention", False)
+
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ pos_ebd_size = self.max_relative_positions * 2
+
+ if self.position_buckets > 0:
+ pos_ebd_size = self.position_buckets * 2
+
+ self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size)
+
+ self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
+
+ if "layer_norm" in self.norm_rel_ebd:
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True)
+
+ self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None
+ self.gradient_checkpointing = False
+
+ def get_rel_embedding(self):
+ rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
+ if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
+ rel_embeddings = self.LayerNorm(rel_embeddings)
+ return rel_embeddings
+
+ def get_attention_mask(self, attention_mask):
+ if attention_mask.dim() <= 2:
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
+ attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
+ elif attention_mask.dim() == 3:
+ attention_mask = attention_mask.unsqueeze(1)
+
+ return attention_mask
+
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
+ if self.relative_attention and relative_pos is None:
+ q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
+ relative_pos = build_relative_position(
+ q,
+ hidden_states.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=hidden_states.device,
+ )
+ return relative_pos
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_hidden_states=True,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ return_dict=True,
+ ):
+ if attention_mask.dim() <= 2:
+ input_mask = attention_mask
+ else:
+ input_mask = attention_mask.sum(-2) > 0
+ attention_mask = self.get_attention_mask(attention_mask)
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[0]
+ else:
+ next_kv = hidden_states
+ rel_embeddings = self.get_rel_embedding()
+ output_states = next_kv
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ if self.gradient_checkpointing and self.training:
+ output_states = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ next_kv,
+ attention_mask,
+ query_states,
+ relative_pos,
+ rel_embeddings,
+ output_attentions,
+ )
+ else:
+ output_states = layer_module(
+ next_kv,
+ attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ output_states, att_m = output_states
+
+ if i == 0 and self.conv is not None:
+ output_states = self.conv(hidden_states, output_states, input_mask)
+
+ if query_states is not None:
+ query_states = output_states
+ if isinstance(hidden_states, Sequence):
+ next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
+ else:
+ next_kv = output_states
+
+ if output_attentions:
+ all_attentions = all_attentions + (att_m,)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ if not return_dict:
+ return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+def make_log_bucket_position(relative_pos, bucket_size, max_position):
+ sign = torch.sign(relative_pos)
+ mid = bucket_size // 2
+ abs_pos = torch.where(
+ (relative_pos < mid) & (relative_pos > -mid),
+ torch.tensor(mid - 1).type_as(relative_pos),
+ torch.abs(relative_pos),
+ )
+ log_pos = (
+ torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid
+ )
+ bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign)
+ return bucket_pos
+
+
+def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None):
+ """
+ Build relative position according to the query and key
+
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
+ P_k\\)
+
+ Args:
+ query_size (int): the length of query
+ key_size (int): the length of key
+ bucket_size (int): the size of position bucket
+ max_position (int): the maximum allowed absolute position
+ device (`torch.device`): the device on which tensors will be created.
+
+ Return:
+ `torch.LongTensor`: A tensor with shape [1, query_size, key_size]
+ """
+
+ q_ids = torch.arange(0, query_size, device=device)
+ k_ids = torch.arange(0, key_size, device=device)
+ rel_pos_ids = q_ids[:, None] - k_ids[None, :]
+ if bucket_size > 0 and max_position > 0:
+ rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
+ rel_pos_ids = rel_pos_ids.to(torch.long)
+ rel_pos_ids = rel_pos_ids[:query_size, :]
+ rel_pos_ids = rel_pos_ids.unsqueeze(0)
+ return rel_pos_ids
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.c2p_dynamic_expand
+def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.p2c_dynamic_expand
+def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
+ return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
+
+
+@torch.jit.script
+# Copied from transformers.models.deberta.modeling_deberta.pos_dynamic_expand
+def pos_dynamic_expand(pos_index, p2c_att, key_layer):
+ return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
+
+
+class DisentangledSelfAttention(nn.Module):
+ """
+ Disentangled self-attention module
+
+ Parameters:
+ config (`DebertaV2Config`):
+ A model config class instance with the configuration to build a new model. The schema is similar to
+ *BertConfig*, for more details, please refer [`DebertaV2Config`]
+
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ _attention_head_size = config.hidden_size // config.num_attention_heads
+ self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.query_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ self.key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ self.value_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+
+ self.share_att_key = getattr(config, "share_att_key", False)
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
+ self.relative_attention = getattr(config, "relative_attention", False)
+
+ if self.relative_attention:
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.pos_ebd_size = self.max_relative_positions
+ if self.position_buckets > 0:
+ self.pos_ebd_size = self.position_buckets
+
+ self.pos_dropout = StableDropout(config.hidden_dropout_prob)
+
+ if not self.share_att_key:
+ if "c2p" in self.pos_att_type:
+ self.pos_key_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=True)
+ if "p2c" in self.pos_att_type:
+ self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = StableDropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x, attention_heads):
+ new_x_shape = x.size()[:-1] + (attention_heads, -1)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1))
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=None,
+ relative_pos=None,
+ rel_embeddings=None,
+ ):
+ """
+ Call the module
+
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
+ *Attention(Q,K,V)*
+
+ attention_mask (`torch.BoolTensor`):
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
+ th token.
+
+ output_attentions (`bool`, optional):
+ Whether return the attention matrix.
+
+ query_states (`torch.FloatTensor`, optional):
+ The *Q* state in *Attention(Q,K,V)*.
+
+ relative_pos (`torch.LongTensor`):
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
+
+ rel_embeddings (`torch.FloatTensor`):
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
+ \\text{max_relative_positions}\\), *hidden_size*].
+
+
+ """
+ if query_states is None:
+ query_states = hidden_states
+ query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
+ key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
+ value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
+
+ rel_att = None
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ scale_factor = 1
+ if "c2p" in self.pos_att_type:
+ scale_factor += 1
+ if "p2c" in self.pos_att_type:
+ scale_factor += 1
+ scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)
+ attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))
+ if self.relative_attention:
+ rel_embeddings = self.pos_dropout(rel_embeddings)
+ rel_att = self.disentangled_attention_bias(
+ query_layer, key_layer, relative_pos, rel_embeddings, scale_factor
+ )
+
+ if rel_att is not None:
+ attention_scores = attention_scores + rel_att
+ attention_scores = attention_scores
+ attention_scores = attention_scores.view(
+ -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1)
+ )
+
+ # bsz x height x length x dimension
+ attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
+ attention_probs = self.dropout(attention_probs)
+ context_layer = torch.bmm(
+ attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer
+ )
+ context_layer = (
+ context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1))
+ .permute(0, 2, 1, 3)
+ .contiguous()
+ )
+ new_context_layer_shape = context_layer.size()[:-2] + (-1,)
+ context_layer = context_layer.view(new_context_layer_shape)
+ if output_attentions:
+ return (context_layer, attention_probs)
+ else:
+ return context_layer
+
+ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
+ if relative_pos is None:
+ q = query_layer.size(-2)
+ relative_pos = build_relative_position(
+ q,
+ key_layer.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=query_layer.device,
+ )
+ if relative_pos.dim() == 2:
+ relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
+ elif relative_pos.dim() == 3:
+ relative_pos = relative_pos.unsqueeze(1)
+ # bsz x height x query x key
+ elif relative_pos.dim() != 4:
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
+
+ att_span = self.pos_ebd_size
+ relative_pos = relative_pos.long().to(query_layer.device)
+
+ rel_embeddings = rel_embeddings[0 : att_span * 2, :].unsqueeze(0)
+ if self.share_att_key:
+ pos_query_layer = self.transpose_for_scores(
+ self.query_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1)
+ pos_key_layer = self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads).repeat(
+ query_layer.size(0) // self.num_attention_heads, 1, 1
+ )
+ else:
+ if "c2p" in self.pos_att_type:
+ pos_key_layer = self.transpose_for_scores(
+ self.pos_key_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
+ if "p2c" in self.pos_att_type:
+ pos_query_layer = self.transpose_for_scores(
+ self.pos_query_proj(rel_embeddings), self.num_attention_heads
+ ).repeat(query_layer.size(0) // self.num_attention_heads, 1, 1) # .split(self.all_head_size, dim=-1)
+
+ score = 0
+ # content->position
+ if "c2p" in self.pos_att_type:
+ scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor)
+ c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2))
+ c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
+ c2p_att = torch.gather(
+ c2p_att,
+ dim=-1,
+ index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]),
+ )
+ score += c2p_att / scale.to(dtype=c2p_att.dtype)
+
+ # position->content
+ if "p2c" in self.pos_att_type:
+ scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor)
+ if key_layer.size(-2) != query_layer.size(-2):
+ r_pos = build_relative_position(
+ key_layer.size(-2),
+ key_layer.size(-2),
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ device=query_layer.device,
+ )
+ r_pos = r_pos.unsqueeze(0)
+ else:
+ r_pos = relative_pos
+
+ p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
+ p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2))
+ p2c_att = torch.gather(
+ p2c_att,
+ dim=-1,
+ index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]),
+ ).transpose(-1, -2)
+ score += p2c_att / scale.to(dtype=p2c_att.dtype)
+
+ return score
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm
+class DebertaV2Embeddings(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ pad_token_id = getattr(config, "pad_token_id", 0)
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
+
+ self.position_biased_input = getattr(config, "position_biased_input", True)
+ if not self.position_biased_input:
+ self.position_embeddings = None
+ else:
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
+
+ if config.type_vocab_size > 0:
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
+
+ if self.embedding_size != config.hidden_size:
+ self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
+ self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps)
+ self.dropout = StableDropout(config.hidden_dropout_prob)
+ self.config = config
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ if self.position_embeddings is not None:
+ position_embeddings = self.position_embeddings(position_ids.long())
+ else:
+ position_embeddings = torch.zeros_like(inputs_embeds)
+
+ embeddings = inputs_embeds
+ if self.position_biased_input:
+ embeddings += position_embeddings
+ if self.config.type_vocab_size > 0:
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+ embeddings += token_type_embeddings
+
+ if self.embedding_size != self.config.hidden_size:
+ embeddings = self.embed_proj(embeddings)
+
+ embeddings = self.LayerNorm(embeddings)
+
+ if mask is not None:
+ if mask.dim() != embeddings.dim():
+ if mask.dim() == 4:
+ mask = mask.squeeze(1).squeeze(1)
+ mask = mask.unsqueeze(2)
+ mask = mask.to(embeddings.dtype)
+
+ embeddings = embeddings * mask
+
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2
+class DebertaV2PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DebertaV2Config
+ base_model_prefix = "deberta"
+ _keys_to_ignore_on_load_unexpected = ["position_embeddings"]
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+DEBERTA_START_DOCSTRING = r"""
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+
+ Parameters:
+ config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_deberta.DebertaModel with Deberta->DebertaV2
+class DebertaV2Model(DebertaV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.embeddings = DebertaV2Embeddings(config)
+ self.encoder = DebertaV2Encoder(config)
+ self.z_steps = 0
+ self.config = config
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embeddings.word_embeddings = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ )
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask,
+ output_hidden_states=True,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+ encoded_layers = encoder_outputs[1]
+
+ if self.z_steps > 1:
+ hidden_states = encoded_layers[-2]
+ layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
+ query_states = encoded_layers[-1]
+ rel_embeddings = self.encoder.get_rel_embedding()
+ attention_mask = self.encoder.get_attention_mask(attention_mask)
+ rel_pos = self.encoder.get_rel_pos(embedding_output)
+ for layer in layers[1:]:
+ query_states = layer(
+ hidden_states,
+ attention_mask,
+ output_attentions=False,
+ query_states=query_states,
+ relative_pos=rel_pos,
+ rel_embeddings=rel_embeddings,
+ )
+ encoded_layers.append(query_states)
+
+ sequence_output = encoded_layers[-1]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
+
+ return BaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
+class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.deberta = DebertaV2Model(config)
+ self.cls = DebertaV2OnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ mask="[MASK]",
+ )
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForMaskedLM.forward with Deberta->DebertaV2
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaPredictionHeadTransform with Deberta->DebertaV2
+class DebertaV2PredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.dense = nn.Linear(config.hidden_size, self.embedding_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_deberta.DebertaLMPredictionHead with Deberta->DebertaV2
+class DebertaV2LMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = DebertaV2PredictionHeadTransform(config)
+
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
+class DebertaV2OnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = DebertaV2LMPredictionHead(config)
+
+ def forward(self, sequence_output):
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaV2ForSequenceClassification(DebertaV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ num_labels = getattr(config, "num_labels", 2)
+ self.num_labels = num_labels
+
+ self.deberta = DebertaV2Model(config)
+ self.pooler = ContextPooler(config)
+ output_dim = self.pooler.output_dim
+
+ self.classifier = nn.Linear(output_dim, num_labels)
+ drop_out = getattr(config, "cls_dropout", None)
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
+ self.dropout = StableDropout(drop_out)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.deberta.get_input_embeddings()
+
+ def set_input_embeddings(self, new_embeddings):
+ self.deberta.set_input_embeddings(new_embeddings)
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForSequenceClassification.forward with Deberta->DebertaV2
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ token_type_ids=token_type_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ encoder_layer = outputs[0]
+ pooled_output = self.pooler(encoder_layer)
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ # regression task
+ loss_fn = nn.MSELoss()
+ logits = logits.view(-1).to(labels.dtype)
+ loss = loss_fn(logits, labels.view(-1))
+ elif labels.dim() == 1 or labels.size(-1) == 1:
+ label_index = (labels >= 0).nonzero()
+ labels = labels.long()
+ if label_index.size(0) > 0:
+ labeled_logits = torch.gather(
+ logits, 0, label_index.expand(label_index.size(0), logits.size(1))
+ )
+ labels = torch.gather(labels, 0, label_index.view(-1))
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
+ else:
+ loss = torch.tensor(0).to(logits)
+ else:
+ log_softmax = nn.LogSoftmax(-1)
+ loss = -((log_softmax(logits) * labels).sum(-1)).mean()
+ elif self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_deberta.DebertaForTokenClassification with Deberta->DebertaV2
+class DebertaV2ForTokenClassification(DebertaV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.deberta = DebertaV2Model(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
+ )
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaV2ForQuestionAnswering(DebertaV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.deberta = DebertaV2Model(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ qa_target_start_index=_QA_TARGET_START_INDEX,
+ qa_target_end_index=_QA_TARGET_END_INDEX,
+ )
+ # Copied from transformers.models.deberta.modeling_deberta.DebertaForQuestionAnswering.forward with Deberta->DebertaV2
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.deberta(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class DebertaV2ForMultipleChoice(DebertaV2PreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ num_labels = getattr(config, "num_labels", 2)
+ self.num_labels = num_labels
+
+ self.deberta = DebertaV2Model(config)
+ self.pooler = ContextPooler(config)
+ output_dim = self.pooler.output_dim
+
+ self.classifier = nn.Linear(output_dim, 1)
+ drop_out = getattr(config, "cls_dropout", None)
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
+ self.dropout = StableDropout(drop_out)
+
+ self.init_weights()
+
+ def get_input_embeddings(self):
+ return self.deberta.get_input_embeddings()
+
+ def set_input_embeddings(self, new_embeddings):
+ self.deberta.set_input_embeddings(new_embeddings)
+
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.deberta(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ token_type_ids=flat_token_type_ids,
+ attention_mask=flat_attention_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ encoder_layer = outputs[0]
+ pooled_output = self.pooler(encoder_layer)
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..546e7f1a8d003857ad31d70728b4fc0bc1d99744
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/modeling_tf_deberta_v2.py
@@ -0,0 +1,1874 @@
+# coding=utf-8
+# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 DeBERTa-v2 model."""
+
+from __future__ import annotations
+
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_deberta_v2 import DebertaV2Config
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "DebertaV2Config"
+_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge"
+
+
+from ..deprecated._archive_maps import TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2
+class TFDebertaV2ContextPooler(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
+ self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states, training: bool = False):
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ context_token = hidden_states[:, 0]
+ context_token = self.dropout(context_token, training=training)
+ pooled_output = self.dense(context_token)
+ pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
+ return pooled_output
+
+ @property
+ def output_dim(self) -> int:
+ return self.config.hidden_size
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.pooler_hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2
+class TFDebertaV2XSoftmax(keras.layers.Layer):
+ """
+ Masked Softmax which is optimized for saving memory
+
+ Args:
+ input (`tf.Tensor`): The input tensor that will apply softmax.
+ mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
+ dim (int): The dimension that will apply softmax
+ """
+
+ def __init__(self, axis=-1, **kwargs):
+ super().__init__(**kwargs)
+ self.axis = axis
+
+ def call(self, inputs: tf.Tensor, mask: tf.Tensor):
+ rmask = tf.logical_not(tf.cast(mask, tf.bool))
+ output = tf.where(rmask, float("-inf"), inputs)
+ output = stable_softmax(output, self.axis)
+ output = tf.where(rmask, 0.0, output)
+ return output
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2
+class TFDebertaV2StableDropout(keras.layers.Layer):
+ """
+ Optimized dropout module for stabilizing the training
+
+ Args:
+ drop_prob (float): the dropout probabilities
+ """
+
+ def __init__(self, drop_prob, **kwargs):
+ super().__init__(**kwargs)
+ self.drop_prob = drop_prob
+
+ @tf.custom_gradient
+ def xdropout(self, inputs):
+ """
+ Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
+ """
+ mask = tf.cast(
+ 1
+ - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
+ tf.bool,
+ )
+ scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32)
+ if self.drop_prob > 0:
+ inputs = tf.where(mask, 0.0, inputs) * scale
+
+ def grad(upstream):
+ if self.drop_prob > 0:
+ return tf.where(mask, 0.0, upstream) * scale
+ else:
+ return upstream
+
+ return inputs, grad
+
+ def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
+ if training:
+ return self.xdropout(inputs)
+ return inputs
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2
+class TFDebertaV2SelfOutput(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(config.hidden_size, name="dense")
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states, input_tensor, training: bool = False):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2
+class TFDebertaV2Attention(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+ self.self = TFDebertaV2DisentangledSelfAttention(config, name="self")
+ self.dense_output = TFDebertaV2SelfOutput(config, name="output")
+ self.config = config
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self(
+ hidden_states=input_tensor,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ if query_states is None:
+ query_states = input_tensor
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=query_states, training=training
+ )
+
+ output = (attention_output,) + self_outputs[1:]
+
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self", None) is not None:
+ with tf.name_scope(self.self.name):
+ self.self.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2
+class TFDebertaV2Intermediate(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2
+class TFDebertaV2Output(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2
+class TFDebertaV2Layer(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFDebertaV2Attention(config, name="attention")
+ self.intermediate = TFDebertaV2Intermediate(config, name="intermediate")
+ self.bert_output = TFDebertaV2Output(config, name="output")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ attention_outputs = self.attention(
+ input_tensor=hidden_states,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = attention_outputs[0]
+ intermediate_output = self.intermediate(hidden_states=attention_output)
+ layer_output = self.bert_output(
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
+ )
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "bert_output", None) is not None:
+ with tf.name_scope(self.bert_output.name):
+ self.bert_output.build(None)
+
+
+class TFDebertaV2ConvLayer(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.kernel_size = getattr(config, "conv_kernel_size", 3)
+ # groups = getattr(config, "conv_groups", 1)
+ self.conv_act = get_tf_activation(getattr(config, "conv_act", "tanh"))
+ self.padding = (self.kernel_size - 1) // 2
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
+ self.config = config
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope("conv"):
+ self.conv_kernel = self.add_weight(
+ name="kernel",
+ shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],
+ initializer=get_initializer(self.config.initializer_range),
+ )
+ self.conv_bias = self.add_weight(
+ name="bias", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()
+ )
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+
+ def call(
+ self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False
+ ) -> tf.Tensor:
+ out = tf.nn.conv2d(
+ tf.expand_dims(hidden_states, 1),
+ tf.expand_dims(self.conv_kernel, 0),
+ strides=1,
+ padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],
+ )
+ out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
+ rmask = tf.cast(1 - input_mask, tf.bool)
+ out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
+ out = self.dropout(out, training=training)
+ out = self.conv_act(out)
+
+ layer_norm_input = residual_states + out
+ output = self.LayerNorm(layer_norm_input)
+
+ if input_mask is None:
+ output_states = output
+ else:
+ if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
+ if len(shape_list(input_mask)) == 4:
+ input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
+ input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), tf.float32)
+
+ output_states = output * input_mask
+
+ return output_states
+
+
+class TFDebertaV2Encoder(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layer = [TFDebertaV2Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+ self.relative_attention = getattr(config, "relative_attention", False)
+ self.config = config
+ if self.relative_attention:
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ self.pos_ebd_size = self.max_relative_positions * 2
+
+ if self.position_buckets > 0:
+ self.pos_ebd_size = self.position_buckets * 2
+
+ self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
+
+ if "layer_norm" in self.norm_rel_ebd:
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+
+ self.conv = TFDebertaV2ConvLayer(config, name="conv") if getattr(config, "conv_kernel_size", 0) > 0 else None
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if self.relative_attention:
+ self.rel_embeddings = self.add_weight(
+ name="rel_embeddings.weight",
+ shape=[self.pos_ebd_size, self.config.hidden_size],
+ initializer=get_initializer(self.config.initializer_range),
+ )
+ if getattr(self, "conv", None) is not None:
+ with tf.name_scope(self.conv.name):
+ self.conv.build(None)
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, self.config.hidden_size])
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def get_rel_embedding(self):
+ rel_embeddings = self.rel_embeddings if self.relative_attention else None
+ if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
+ rel_embeddings = self.LayerNorm(rel_embeddings)
+ return rel_embeddings
+
+ def get_attention_mask(self, attention_mask):
+ if len(shape_list(attention_mask)) <= 2:
+ extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
+ attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
+ attention_mask = tf.cast(attention_mask, tf.uint8)
+ elif len(shape_list(attention_mask)) == 3:
+ attention_mask = tf.expand_dims(attention_mask, 1)
+
+ return attention_mask
+
+ def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
+ if self.relative_attention and relative_pos is None:
+ q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
+ relative_pos = build_relative_position(
+ q,
+ shape_list(hidden_states)[-2],
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ )
+ return relative_pos
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ if len(shape_list(attention_mask)) <= 2:
+ input_mask = attention_mask
+ else:
+ input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ attention_mask = self.get_attention_mask(attention_mask)
+ relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
+
+ next_kv = hidden_states
+
+ rel_embeddings = self.get_rel_embedding()
+ output_states = next_kv
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ layer_outputs = layer_module(
+ hidden_states=next_kv,
+ attention_mask=attention_mask,
+ query_states=query_states,
+ relative_pos=relative_pos,
+ rel_embeddings=rel_embeddings,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ output_states = layer_outputs[0]
+
+ if i == 0 and self.conv is not None:
+ output_states = self.conv(hidden_states, output_states, input_mask)
+
+ next_kv = output_states
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (output_states,)
+
+ if not return_dict:
+ return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+def make_log_bucket_position(relative_pos, bucket_size, max_position):
+ sign = tf.math.sign(relative_pos)
+ mid = bucket_size // 2
+ abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
+ log_pos = (
+ tf.math.ceil(
+ tf.cast(tf.math.log(abs_pos / mid), tf.float32) / tf.math.log((max_position - 1) / mid) * (mid - 1)
+ )
+ + mid
+ )
+ bucket_pos = tf.cast(
+ tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32
+ )
+ return bucket_pos
+
+
+def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
+ """
+ Build relative position according to the query and key
+
+ We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
+ \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
+ P_k\\)
+
+ Args:
+ query_size (int): the length of query
+ key_size (int): the length of key
+ bucket_size (int): the size of position bucket
+ max_position (int): the maximum allowed absolute position
+
+ Return:
+ `tf.Tensor`: A tensor with shape [1, query_size, key_size]
+
+ """
+ q_ids = tf.range(query_size, dtype=tf.int32)
+ k_ids = tf.range(key_size, dtype=tf.int32)
+ rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
+ if bucket_size > 0 and max_position > 0:
+ rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
+ rel_pos_ids = rel_pos_ids[:query_size, :]
+ rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
+ return tf.cast(rel_pos_ids, tf.int64)
+
+
+def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
+ shapes = [
+ shape_list(query_layer)[0],
+ shape_list(query_layer)[1],
+ shape_list(query_layer)[2],
+ shape_list(relative_pos)[-1],
+ ]
+ return tf.broadcast_to(c2p_pos, shapes)
+
+
+def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
+ shapes = [
+ shape_list(query_layer)[0],
+ shape_list(query_layer)[1],
+ shape_list(key_layer)[-2],
+ shape_list(key_layer)[-2],
+ ]
+ return tf.broadcast_to(c2p_pos, shapes)
+
+
+def pos_dynamic_expand(pos_index, p2c_att, key_layer):
+ shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
+ return tf.broadcast_to(pos_index, shapes)
+
+
+def take_along_axis(x, indices):
+ # Only a valid port of np.take_along_axis when the gather axis is -1
+
+ # TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
+ # [B, S, P] -> [B, S, P, D]
+ one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
+
+ # if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x)
+ # grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P]
+ gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x)
+
+ # GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls
+ else:
+ gathered = tf.gather(x, indices, batch_dims=2)
+
+ return gathered
+
+
+class TFDebertaV2DisentangledSelfAttention(keras.layers.Layer):
+ """
+ Disentangled self-attention module
+
+ Parameters:
+ config (`DebertaV2Config`):
+ A model config class instance with the configuration to build a new model. The schema is similar to
+ *BertConfig*, for more details, please refer [`DebertaV2Config`]
+
+ """
+
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+ self.num_attention_heads = config.num_attention_heads
+ _attention_head_size = config.hidden_size // config.num_attention_heads
+ self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.query_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="query_proj",
+ use_bias=True,
+ )
+ self.key_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="key_proj",
+ use_bias=True,
+ )
+ self.value_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="value_proj",
+ use_bias=True,
+ )
+
+ self.share_att_key = getattr(config, "share_att_key", False)
+ self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
+ self.relative_attention = getattr(config, "relative_attention", False)
+
+ if self.relative_attention:
+ self.position_buckets = getattr(config, "position_buckets", -1)
+ self.max_relative_positions = getattr(config, "max_relative_positions", -1)
+ if self.max_relative_positions < 1:
+ self.max_relative_positions = config.max_position_embeddings
+ self.pos_ebd_size = self.max_relative_positions
+ if self.position_buckets > 0:
+ self.pos_ebd_size = self.position_buckets
+
+ self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="pos_dropout")
+
+ if not self.share_att_key:
+ if "c2p" in self.pos_att_type:
+ self.pos_key_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="pos_proj",
+ use_bias=True,
+ )
+ if "p2c" in self.pos_att_type:
+ self.pos_query_proj = keras.layers.Dense(
+ self.all_head_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="pos_q_proj",
+ )
+ self.softmax = TFDebertaV2XSoftmax(axis=-1)
+ self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name="dropout")
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
+ tensor_shape = shape_list(tensor)
+ # In graph mode mode, we can't reshape with -1 as the final dimension if the first dimension (batch size) is None
+ shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=shape)
+ tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
+ x_shape = shape_list(tensor)
+ tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
+ return tensor
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ query_states: tf.Tensor = None,
+ relative_pos: tf.Tensor = None,
+ rel_embeddings: tf.Tensor = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Call the module
+
+ Args:
+ hidden_states (`tf.Tensor`):
+ Input states to the module usually the output from previous layer, it will be the Q,K and V in
+ *Attention(Q,K,V)*
+
+ attention_mask (`tf.Tensor`):
+ An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
+ sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
+ th token.
+
+ return_att (`bool`, optional):
+ Whether return the attention matrix.
+
+ query_states (`tf.Tensor`, optional):
+ The *Q* state in *Attention(Q,K,V)*.
+
+ relative_pos (`tf.Tensor`):
+ The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
+ values ranging in [*-max_relative_positions*, *max_relative_positions*].
+
+ rel_embeddings (`tf.Tensor`):
+ The embedding of relative distances. It's a tensor of shape [\\(2 \\times
+ \\text{max_relative_positions}\\), *hidden_size*].
+
+
+ """
+ if query_states is None:
+ query_states = hidden_states
+ query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
+ key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
+ value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
+
+ rel_att = None
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ scale_factor = 1
+ if "c2p" in self.pos_att_type:
+ scale_factor += 1
+ if "p2c" in self.pos_att_type:
+ scale_factor += 1
+ scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, tf.float32))
+ attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1]) / scale)
+ if self.relative_attention:
+ rel_embeddings = self.pos_dropout(rel_embeddings)
+ rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
+
+ if rel_att is not None:
+ attention_scores = attention_scores + rel_att
+ attention_scores = tf.reshape(
+ attention_scores,
+ (-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),
+ )
+
+ # bsz x height x length x dimension
+ attention_probs = self.softmax(attention_scores, attention_mask)
+ attention_probs = self.dropout(attention_probs, training=training)
+ context_layer = tf.matmul(
+ tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),
+ value_layer,
+ )
+ context_layer = tf.transpose(
+ tf.reshape(
+ context_layer,
+ [-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],
+ ),
+ [0, 2, 1, 3],
+ )
+ # Set the final dimension here explicitly.
+ # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
+ # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
+ # requires final input dimension to be defined
+ context_layer_shape = shape_list(context_layer)
+ new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
+ context_layer = tf.reshape(context_layer, new_context_layer_shape)
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ return outputs
+
+ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
+ if relative_pos is None:
+ q = shape_list(query_layer)[-2]
+ relative_pos = build_relative_position(
+ q,
+ shape_list(key_layer)[-2],
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ )
+ shape_list_pos = shape_list(relative_pos)
+ if len(shape_list_pos) == 2:
+ relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
+ elif len(shape_list_pos) == 3:
+ relative_pos = tf.expand_dims(relative_pos, 1)
+ # bsz x height x query x key
+ elif len(shape_list_pos) != 4:
+ raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
+
+ att_span = self.pos_ebd_size
+ rel_embeddings = tf.expand_dims(
+ rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0
+ )
+ if self.share_att_key:
+ pos_query_layer = tf.tile(
+ self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
+ )
+ pos_key_layer = tf.tile(
+ self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
+ )
+ else:
+ if "c2p" in self.pos_att_type:
+ pos_key_layer = tf.tile(
+ self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
+ ) # .split(self.all_head_size, dim=-1)
+ if "p2c" in self.pos_att_type:
+ pos_query_layer = tf.tile(
+ self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),
+ [shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
+ ) # .split(self.all_head_size, dim=-1)
+
+ score = 0
+ # content->position
+ if "c2p" in self.pos_att_type:
+ scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, tf.float32))
+ c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
+ c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
+ c2p_att = take_along_axis(
+ c2p_att,
+ tf.broadcast_to(
+ tf.squeeze(c2p_pos, 0),
+ [shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],
+ ),
+ )
+ score += c2p_att / scale
+
+ # position->content
+ if "p2c" in self.pos_att_type:
+ scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, tf.float32))
+ if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
+ r_pos = build_relative_position(
+ shape_list(key_layer)[-2],
+ shape_list(key_layer)[-2],
+ bucket_size=self.position_buckets,
+ max_position=self.max_relative_positions,
+ )
+ r_pos = tf.expand_dims(r_pos, 0)
+ else:
+ r_pos = relative_pos
+
+ p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
+
+ p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
+ p2c_att = tf.transpose(
+ take_along_axis(
+ p2c_att,
+ tf.broadcast_to(
+ tf.squeeze(p2c_pos, 0),
+ [shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],
+ ),
+ ),
+ [0, 2, 1],
+ )
+ score += p2c_att / scale
+
+ return score
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query_proj", None) is not None:
+ with tf.name_scope(self.query_proj.name):
+ self.query_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "key_proj", None) is not None:
+ with tf.name_scope(self.key_proj.name):
+ self.key_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "value_proj", None) is not None:
+ with tf.name_scope(self.value_proj.name):
+ self.value_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "pos_dropout", None) is not None:
+ with tf.name_scope(self.pos_dropout.name):
+ self.pos_dropout.build(None)
+ if getattr(self, "pos_key_proj", None) is not None:
+ with tf.name_scope(self.pos_key_proj.name):
+ self.pos_key_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "pos_query_proj", None) is not None:
+ with tf.name_scope(self.pos_query_proj.name):
+ self.pos_query_proj.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2
+class TFDebertaV2Embeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position and token_type embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.position_biased_input = getattr(config, "position_biased_input", True)
+ self.initializer_range = config.initializer_range
+ if self.embedding_size != config.hidden_size:
+ self.embed_proj = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="embed_proj",
+ use_bias=False,
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("token_type_embeddings"):
+ if self.config.type_vocab_size > 0:
+ self.token_type_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.config.type_vocab_size, self.embedding_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+ else:
+ self.token_type_embeddings = None
+
+ with tf.name_scope("position_embeddings"):
+ if self.position_biased_input:
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+ else:
+ self.position_embeddings = None
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "embed_proj", None) is not None:
+ with tf.name_scope(self.embed_proj.name):
+ self.embed_proj.build([None, None, self.embedding_size])
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ token_type_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ mask: tf.Tensor = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ final_embeddings = inputs_embeds
+ if self.position_biased_input:
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings += position_embeds
+ if self.config.type_vocab_size > 0:
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
+ final_embeddings += token_type_embeds
+
+ if self.embedding_size != self.hidden_size:
+ final_embeddings = self.embed_proj(final_embeddings)
+
+ final_embeddings = self.LayerNorm(final_embeddings)
+
+ if mask is not None:
+ if len(shape_list(mask)) != len(shape_list(final_embeddings)):
+ if len(shape_list(mask)) == 4:
+ mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
+ mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)
+
+ final_embeddings = final_embeddings * mask
+
+ final_embeddings = self.dropout(final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2
+class TFDebertaV2PredictionHeadTransform(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.dense = keras.layers.Dense(
+ units=self.embedding_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.embedding_size])
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2
+class TFDebertaV2LMPredictionHead(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
+
+ self.transform = TFDebertaV2PredictionHeadTransform(config, name="transform")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+
+ def get_output_embeddings(self) -> keras.layers.Layer:
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value: tf.Variable):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self) -> Dict[str, tf.Variable]:
+ return {"bias": self.bias}
+
+ def set_bias(self, value: tf.Variable):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.transform(hidden_states=hidden_states)
+ seq_length = shape_list(hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2
+class TFDebertaV2OnlyMLMHead(keras.layers.Layer):
+ def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+ self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name="predictions")
+
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
+ prediction_scores = self.predictions(hidden_states=sequence_output)
+
+ return prediction_scores
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2
+class TFDebertaV2MainLayer(keras.layers.Layer):
+ config_class = DebertaV2Config
+
+ def __init__(self, config: DebertaV2Config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+
+ self.embeddings = TFDebertaV2Embeddings(config, name="embeddings")
+ self.encoder = TFDebertaV2Encoder(config, name="encoder")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape, value=0)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ mask=attention_mask,
+ training=training,
+ )
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+
+ if not return_dict:
+ return (sequence_output,) + encoder_outputs[1:]
+
+ return TFBaseModelOutput(
+ last_hidden_state=sequence_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+
+
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2
+class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = DebertaV2Config
+ base_model_prefix = "deberta"
+
+
+DEBERTA_START_DOCSTRING = r"""
+ The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
+ Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
+ on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
+ improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+DEBERTA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2
+class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+
+
+@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2
+class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+ self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
+
+ def get_lm_head(self) -> keras.layers.Layer:
+ return self.mlm.predictions
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "mlm", None) is not None:
+ with tf.name_scope(self.mlm.name):
+ self.mlm.build(None)
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
+ pooled output) e.g. for GLUE tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2
+class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+ self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
+
+ drop_out = getattr(config, "cls_dropout", None)
+ drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
+ self.dropout = TFDebertaV2StableDropout(drop_out, name="cls_dropout")
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="classifier",
+ )
+ self.output_dim = self.pooler.output_dim
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ pooled_output = self.pooler(sequence_output, training=training)
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.output_dim])
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2
+class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(inputs=sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2
+class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+ self.qa_outputs = keras.layers.Dense(
+ units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: np.ndarray | tf.Tensor | None = None,
+ end_positions: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.deberta(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ logits = self.qa_outputs(inputs=sequence_output)
+ start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
+ start_logits = tf.squeeze(input=start_logits, axis=-1)
+ end_logits = tf.squeeze(input=end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions}
+ labels["end_position"] = end_positions
+ loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ DEBERTA_START_DOCSTRING,
+)
+class TFDebertaV2ForMultipleChoice(TFDebertaV2PreTrainedModel, TFMultipleChoiceLoss):
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
+ # _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
+ # _keys_to_ignore_on_load_missing = [r"dropout"]
+
+ def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.deberta = TFDebertaV2MainLayer(config, name="deberta")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
+ self.classifier = keras.layers.Dense(
+ units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.output_dim = self.pooler.output_dim
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = (
+ tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
+ )
+ flat_token_type_ids = (
+ tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
+ )
+ flat_position_ids = (
+ tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
+ )
+ flat_inputs_embeds = (
+ tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ outputs = self.deberta(
+ input_ids=flat_input_ids,
+ attention_mask=flat_attention_mask,
+ token_type_ids=flat_token_type_ids,
+ position_ids=flat_position_ids,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ pooled_output = self.pooler(sequence_output, training=training)
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "deberta", None) is not None:
+ with tf.name_scope(self.deberta.name):
+ self.deberta.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.output_dim])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a92103945416d78c24d7f77b541eac9c4d6b62e6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2.py
@@ -0,0 +1,521 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for model DeBERTa."""
+
+import os
+import unicodedata
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as sp
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "spm.model"}
+
+
+class DebertaV2Tokenizer(PreTrainedTokenizer):
+ r"""
+ Constructs a DeBERTa-v2 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ bos_token (`string`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+ eos_token (`string`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token. When building a sequence using special tokens, this is not the token that is
+ used for the end of sequence. The token used is the `sep_token`.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=False,
+ split_by_punct=False,
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.do_lower_case = do_lower_case
+ self.split_by_punct = split_by_punct
+ self.vocab_file = vocab_file
+ self._tokenizer = SPMTokenizer(
+ vocab_file, None, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs
+ )
+ unk_token = AddedToken(unk_token, normalized=True, special=True) if isinstance(unk_token, str) else unk_token
+ super().__init__(
+ do_lower_case=do_lower_case,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ split_by_punct=split_by_punct,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+ self._tokenizer.special_tokens = self.all_special_tokens
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ @property
+ def vocab(self):
+ return self._tokenizer.vocab
+
+ def get_vocab(self):
+ vocab = self.vocab.copy()
+ vocab.update(self.get_added_vocab())
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ if self.do_lower_case:
+ text = text.lower()
+ return self._tokenizer.tokenize(text)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self._tokenizer.spm.PieceToId(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self._tokenizer.spm.IdToPiece(index) if index < self.vocab_size else self.unk_token
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ return self._tokenizer.decode(tokens)
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A DeBERTa sequence has the following format:
+
+ - single sequence: [CLS] X [SEP]
+ - pair of sequences: [CLS] A [SEP] B [SEP]
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", False)
+ if is_split_into_words or add_prefix_space:
+ text = " " + text
+ return (text, kwargs)
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)
+
+
+class SPMTokenizer:
+ r"""
+ Constructs a tokenizer based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+ """
+
+ def __init__(
+ self, vocab_file, special_tokens, split_by_punct=False, sp_model_kwargs: Optional[Dict[str, Any]] = None
+ ):
+ self.split_by_punct = split_by_punct
+ self.vocab_file = vocab_file
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
+ if not os.path.exists(vocab_file):
+ raise FileNotFoundError(f"{vocab_file} does not exist!")
+ spm.load(vocab_file)
+ bpe_vocab_size = spm.GetPieceSize()
+ # Token map
+ # 0+1
+ # 1+1
+ # 2+1
+ self.vocab = {spm.IdToPiece(i): i for i in range(bpe_vocab_size)}
+ self.ids_to_tokens = [spm.IdToPiece(i) for i in range(bpe_vocab_size)]
+ # self.vocab['[PAD]'] = 0
+ # self.vocab['[CLS]'] = 1
+ # self.vocab['[SEP]'] = 2
+ # self.vocab['[UNK]'] = 3
+
+ self.spm = spm
+ self.special_tokens = special_tokens
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["spm"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.spm = sp.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.spm.Load(self.vocab_file)
+
+ def tokenize(self, text):
+ return self._encode_as_pieces(text)
+
+ def convert_ids_to_tokens(self, ids):
+ tokens = []
+ for i in ids:
+ tokens.append(self.ids_to_tokens[i])
+ return tokens
+
+ def decode(self, tokens, start=-1, end=-1, raw_text=None):
+ if raw_text is None:
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.special_tokens:
+ if not prev_is_special:
+ out_string += " "
+ out_string += self.spm.decode_pieces(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.spm.decode_pieces(current_sub_tokens)
+ return out_string.strip()
+ else:
+ words = self.split_to_words(raw_text)
+ word_tokens = [self.tokenize(w) for w in words]
+ token2words = [0] * len(tokens)
+ tid = 0
+ for i, w in enumerate(word_tokens):
+ for k, t in enumerate(w):
+ token2words[tid] = i
+ tid += 1
+ word_start = token2words[start]
+ word_end = token2words[end] if end < len(tokens) else len(words)
+ text = "".join(words[word_start:word_end])
+ return text
+
+ # TODO add a deprecation cycle as this can have different behaviour from our API
+ def add_special_token(self, token):
+ if token not in self.special_tokens:
+ self.special_tokens.append(token)
+ if token not in self.vocab:
+ self.vocab[token] = len(self.vocab) - 1
+ self.ids_to_tokens.append(token)
+ return self.id(token)
+
+ def part_of_whole_word(self, token, is_bos=False):
+ logger.warning_once(
+ "The `DebertaTokenizer.part_of_whole_word` method is deprecated and will be removed in `transformers==4.35`"
+ )
+ if is_bos:
+ return True
+ if (
+ len(token) == 1
+ and (_is_whitespace(list(token)[0]) or _is_control(list(token)[0]) or _is_punctuation(list(token)[0]))
+ ) or token in self.special_tokens:
+ return False
+
+ word_start = b"\xe2\x96\x81".decode("utf-8")
+ return not token.startswith(word_start)
+
+ def pad(self):
+ return "[PAD]"
+
+ def bos(self):
+ return "[CLS]"
+
+ def eos(self):
+ return "[SEP]"
+
+ def unk(self):
+ return "[UNK]"
+
+ def mask(self):
+ return "[MASK]"
+
+ def sym(self, id):
+ return self.ids_to_tokens[id]
+
+ def id(self, sym):
+ logger.warning_once(
+ "The `DebertaTokenizer.id` method is deprecated and will be removed in `transformers==4.35`"
+ )
+ return self.vocab[sym] if sym in self.vocab else 1
+
+ def _encode_as_pieces(self, text):
+ text = convert_to_unicode(text)
+ if self.split_by_punct:
+ words = self._run_split_on_punc(text)
+ pieces = [self.spm.encode(w, out_type=str) for w in words]
+ return [p for w in pieces for p in w]
+ else:
+ return self.spm.encode(text, out_type=str)
+
+ def split_to_words(self, text):
+ pieces = self._encode_as_pieces(text)
+ word_start = b"\xe2\x96\x81".decode("utf-8")
+ words = []
+ offset = 0
+ prev_end = 0
+ for i, p in enumerate(pieces):
+ if p.startswith(word_start):
+ if offset > prev_end:
+ words.append(text[prev_end:offset])
+ prev_end = offset
+ w = p.replace(word_start, "")
+ else:
+ w = p
+ try:
+ s = text.index(w, offset)
+ pn = ""
+ k = i + 1
+ while k < len(pieces):
+ pn = pieces[k].replace(word_start, "")
+ if len(pn) > 0:
+ break
+ k += 1
+
+ if len(pn) > 0 and pn in text[offset:s]:
+ offset = offset + 1
+ else:
+ offset = s + len(w)
+ except Exception:
+ offset = offset + 1
+
+ if prev_end < offset:
+ words.append(text[prev_end:offset])
+
+ return words
+
+ def _run_split_on_punc(self, text):
+ """Splits punctuation on a piece of text."""
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def save_pretrained(self, path: str, filename_prefix: str = None):
+ filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]]
+ if filename_prefix is not None:
+ filename = filename_prefix + "-" + filename
+ full_path = os.path.join(path, filename)
+ with open(full_path, "wb") as fs:
+ fs.write(self.spm.serialized_model_proto())
+ return (full_path,)
+
+
+def _is_whitespace(char):
+ """Checks whether `chars` is a whitespace character."""
+ # \t, \n, and \r are technically control characters but we treat them
+ # as whitespace since they are generally considered as such.
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
+ return True
+ cat = unicodedata.category(char)
+ if cat == "Zs":
+ return True
+ return False
+
+
+def _is_control(char):
+ """Checks whether `chars` is a control character."""
+ # These are technically control characters but we count them as whitespace
+ # characters.
+ if char == "\t" or char == "\n" or char == "\r":
+ return False
+ cat = unicodedata.category(char)
+ if cat.startswith("C"):
+ return True
+ return False
+
+
+def _is_punctuation(char):
+ """Checks whether `chars` is a punctuation character."""
+ cp = ord(char)
+ # We treat all non-letter/number ASCII as punctuation.
+ # Characters such as "^", "$", and "`" are not in the Unicode
+ # Punctuation class but we treat them as punctuation anyways, for
+ # consistency.
+ if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
+ return True
+ cat = unicodedata.category(char)
+ if cat.startswith("P"):
+ return True
+ return False
+
+
+def convert_to_unicode(text):
+ """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
+ if isinstance(text, str):
+ return text
+ elif isinstance(text, bytes):
+ return text.decode("utf-8", "ignore")
+ else:
+ raise ValueError(f"Unsupported string type: {type(text)}")
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb92a61edf1afbde7c21f4be7130bb649ef3a8ab
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
@@ -0,0 +1,220 @@
+# coding=utf-8
+# Copyright 2020 Microsoft and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization class for model DeBERTa."""
+
+import os
+from shutil import copyfile
+from typing import Optional, Tuple
+
+from ...file_utils import is_sentencepiece_available
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_deberta_v2 import DebertaV2Tokenizer
+else:
+ DebertaV2Tokenizer = None
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"}
+
+
+class DebertaV2TokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ bos_token (`string`, *optional*, defaults to `"[CLS]"`):
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+ eos_token (`string`, *optional*, defaults to `"[SEP]"`):
+ The end of sequence token. When building a sequence using special tokens, this is not the token that is
+ used for the end of sequence. The token used is the `sep_token`.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = DebertaV2Tokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=False,
+ split_by_punct=False,
+ bos_token="[CLS]",
+ eos_token="[SEP]",
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ **kwargs,
+ ) -> None:
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ split_by_punct=split_by_punct,
+ **kwargs,
+ )
+
+ self.do_lower_case = do_lower_case
+ self.split_by_punct = split_by_punct
+ self.vocab_file = vocab_file
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A DeBERTa sequence has the following format:
+
+ - single sequence: [CLS] X [SEP]
+ - pair of sequences: [CLS] A [SEP] B [SEP]
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa
+ sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96cafe55435fb392f5d4567fdf07c074177e06f6
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b051a3dbe6f7fa8fb3af9fdb2fa41d98557c2917
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/configuration_gpt2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f94baa3ef49b0e83b470c5406e336f05398fbc07
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_flax_gpt2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04652cc33bce6161389f35ee136de63a639cac62
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/modeling_tf_gpt2.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a20da110ddf599bf46de435b331ba319dad3d375
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0e06970073aac2a8a2b4f70b49b0aa0ba7b3615c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt2/__pycache__/tokenization_gpt2_tf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/llava/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..79f7b3ea3095599f363a9311bc3db4067d097c37
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/llava/__init__.py
@@ -0,0 +1,57 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_llava": ["LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlavaConfig"],
+ "processing_llava": ["LlavaProcessor"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_llava"] = [
+ "LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "LlavaForConditionalGeneration",
+ "LlavaPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_llava import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlavaConfig
+ from .processing_llava import LlavaProcessor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_llava import (
+ LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ LlavaForConditionalGeneration,
+ LlavaPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/convert_llava_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/convert_llava_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6737df639c9a6cd6b76932103601c0dbc6e0c795
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/convert_llava_weights_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e20359cf4c514d16a7751e4df8f4fb2cad6f482
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/llava/__pycache__/modeling_llava.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py b/venv/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c322f41de7de2f196502587e94fff94f60fad6f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/llava/configuration_llava.py
@@ -0,0 +1,156 @@
+# coding=utf-8
+# Copyright 2023 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Llava model configuration"""
+
+import warnings
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import LLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class LlavaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an
+ Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Llava-9B.
+
+ e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `CLIPVisionConfig`):
+ The config object or dictionary of the vision backbone.
+ text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`):
+ The config object or dictionary of the text backbone.
+ ignore_index (`int`, *optional*, defaults to -100):
+ The ignore index for the loss function.
+ image_token_index (`int`, *optional*, defaults to 32000):
+ The image token index to encode the image prompt.
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The activation function used by the multimodal projector.
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
+ The feature selection strategy used to select the vision feature from the vision backbone.
+ Can be one of `"default"` or `"full"`.
+ vision_feature_layer (`int`, *optional*, defaults to -2):
+ The index of the layer to select the vision feature.
+
+ Example:
+
+ ```python
+ >>> from transformers import LlavaForConditionalGeneration, LlavaConfig, CLIPVisionConfig, LlamaConfig
+
+ >>> # Initializing a CLIP-vision config
+ >>> vision_config = CLIPVisionConfig()
+
+ >>> # Initializing a Llama config
+ >>> text_config = LlamaConfig()
+
+ >>> # Initializing a Llava llava-1.5-7b style configuration
+ >>> configuration = LlavaConfig(vision_config, text_config)
+
+ >>> # Initializing a model from the llava-1.5-7b style configuration
+ >>> model = LlavaForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "llava"
+ is_composition = False
+
+ def __init__(
+ self,
+ vision_config=None,
+ text_config=None,
+ ignore_index=-100,
+ image_token_index=32000,
+ projector_hidden_act="gelu",
+ vision_feature_select_strategy="default",
+ vision_feature_layer=-2,
+ **kwargs,
+ ):
+ self.ignore_index = ignore_index
+ self.image_token_index = image_token_index
+ self.projector_hidden_act = projector_hidden_act
+
+ if vision_feature_select_strategy not in ["default", "full"]:
+ raise ValueError(
+ "vision_feature_select_strategy should be one of 'default', 'full'."
+ f"Got: {vision_feature_select_strategy}"
+ )
+
+ if "vocab_size" in kwargs:
+ warnings.warn(
+ "The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect",
+ FutureWarning,
+ )
+
+ self.vision_feature_select_strategy = vision_feature_select_strategy
+ self.vision_feature_layer = vision_feature_layer
+
+ if isinstance(vision_config, dict):
+ vision_config["model_type"] = (
+ vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
+ )
+ vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
+ elif vision_config is None:
+ vision_config = CONFIG_MAPPING["clip_vision_model"](
+ intermediate_size=4096,
+ hidden_size=1024,
+ patch_size=14,
+ image_size=336,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ vocab_size=32000,
+ projection_dim=768,
+ )
+
+ self.vision_config = vision_config
+
+ if isinstance(text_config, dict):
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
+ elif text_config is None:
+ text_config = CONFIG_MAPPING["llama"]()
+
+ self.text_config = text_config
+ self._vocab_size = self.text_config.vocab_size
+
+ super().__init__(**kwargs)
+
+ @property
+ def vocab_size(self):
+ warnings.warn(
+ "The `vocab_size` attribute is deprecated and will be removed in v4.42, Please use `text_config.vocab_size` instead.",
+ FutureWarning,
+ )
+ return self._vocab_size
+
+ @vocab_size.setter
+ def vocab_size(self, value):
+ self._vocab_size = value
+
+ def to_dict(self):
+ output = super().to_dict()
+ output.pop("_vocab_size", None)
+ return output
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/convert_llava_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/llava/convert_llava_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb40668f32c7d00506f52f287f31723667d3793a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/llava/convert_llava_weights_to_hf.py
@@ -0,0 +1,148 @@
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import (
+ AddedToken,
+ AutoConfig,
+ AutoTokenizer,
+ CLIPImageProcessor,
+ LlavaConfig,
+ LlavaForConditionalGeneration,
+ LlavaProcessor,
+)
+
+
+EPILOG_TXT = """Example:
+ python transformers/src/transformers/models/llava/convert_llava_weights_to_hf.py --text_model_id lmsys/vicuna-7b-v1.5 --vision_model_id openai/clip-vit-large-patch14-336 --output_hub_path org/llava-v1.5-7b-conv --old_state_dict_id liuhaotian/llava-v1.5-7b
+
+Example for creating the old state dict file with Python:
+
+ import torch
+ from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM
+
+ # load model
+ kwargs = {"device_map": "auto", "torch_dtype": torch.float16}
+ model = LlavaLlamaForCausalLM.from_pretrained("liuhaotian/llava-v1.5-7b", low_cpu_mem_usage=True, **kwargs)
+
+ # load vision tower
+ model.get_vision_tower().load_model()
+
+ # Save state dict
+ torch.save(model.state_dict(), "tmp/hf_models/llava-v1.5-7b/model_state_dict.bin")
+"""
+
+KEYS_TO_MODIFY_MAPPING = {
+ "model.vision_tower.": "",
+ "model.mm_projector": "multi_modal_projector",
+ "model": "model.model",
+ "vision_model.model": "vision_model",
+ "lm_head": "language_model.lm_head",
+ "model.model": "language_model.model",
+ "multi_modal_projector.0": "multi_modal_projector.linear_1",
+ "multi_modal_projector.2": "multi_modal_projector.linear_2",
+}
+
+
+def convert_state_dict_to_hf(state_dict):
+ new_state_dict = {}
+ for key, value in state_dict.items():
+ if key.endswith(".inv_freq"):
+ continue
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
+ if key_to_modify in key:
+ key = key.replace(key_to_modify, new_key)
+
+ new_state_dict[key] = value
+ return new_state_dict
+
+
+def convert_llava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
+ torch.set_default_dtype(torch.float16)
+ text_config = AutoConfig.from_pretrained(text_model_id)
+
+ tokenizer = AutoTokenizer.from_pretrained(text_model_id)
+ tokenizer.add_tokens(AddedToken("", special=True, normalized=False), special_tokens=True)
+ tokenizer.add_special_tokens({"pad_token": ""})
+
+ image_processor = CLIPImageProcessor.from_pretrained(vision_model_id)
+
+ processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
+
+ config = LlavaConfig(text_config=text_config)
+ config.pad_token_id = 32001
+
+ with torch.device("meta"):
+ model = LlavaForConditionalGeneration(config)
+
+ # Pad to 64 for performance reasons
+ pad_shape = 64
+
+ state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict.bin")
+
+ state_dict = torch.load(state_dict_path, map_location="cpu")
+ state_dict = convert_state_dict_to_hf(state_dict)
+ model.load_state_dict(state_dict, strict=True, assign=True)
+
+ pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
+ mu = torch.mean(pre_expansion_embeddings, dim=0).float()
+ n = pre_expansion_embeddings.size()[0]
+ sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
+ dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
+
+ # We add an image token so we resize the model
+ model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
+ model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack(
+ tuple((dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0]))),
+ dim=0,
+ )
+ model.language_model.lm_head.weight.data[32000:] = torch.stack(
+ tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0]))),
+ dim=0,
+ )
+
+ model.push_to_hub(output_hub_path)
+ processor.push_to_hub(output_hub_path)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ epilog=EPILOG_TXT,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--text_model_id",
+ help="Hub location of the text model",
+ )
+ parser.add_argument(
+ "--vision_model_id",
+ help="Hub location of the vision model",
+ )
+ parser.add_argument(
+ "--output_hub_path",
+ help="Location on the hub of the converted model",
+ )
+ parser.add_argument(
+ "--old_state_dict_id",
+ help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`",
+ )
+ args = parser.parse_args()
+ convert_llava_llama_to_hf(args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/modeling_llava.py b/venv/lib/python3.10/site-packages/transformers/models/llava/modeling_llava.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cf5d98f77f11480e64d6364627e61c29cbe2ce6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/llava/modeling_llava.py
@@ -0,0 +1,572 @@
+# coding=utf-8
+# Copyright 2023 the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Llava model."""
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ... import PreTrainedModel
+from ...activations import ACT2FN
+from ...cache_utils import Cache
+from ...modeling_outputs import ModelOutput
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..auto import AutoModel, AutoModelForCausalLM
+from .configuration_llava import LlavaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "LlavaConfig"
+
+
+from ..deprecated._archive_maps import LLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->Llava
+class LlavaCausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for Llava causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class LlavaMultiModalProjector(nn.Module):
+ def __init__(self, config: LlavaConfig):
+ super().__init__()
+
+ self.linear_1 = nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
+ self.act = ACT2FN[config.projector_hidden_act]
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
+
+ def forward(self, image_features):
+ hidden_states = self.linear_1(image_features)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.linear_2(hidden_states)
+ return hidden_states
+
+
+LLAVA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`LlavaConfig`] or [`LlavaVisionConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAVA_START_DOCSTRING,
+)
+class LlavaPreTrainedModel(PreTrainedModel):
+ config_class = LlavaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["LlavaVisionAttention"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module):
+ # important: this ported version of Llava isn't meant for training from scratch - only
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
+ # https://github.com/haotian-liu/LLaVA/tree/main/llava should serve for that purpose
+ std = (
+ self.config.initializer_range
+ if hasattr(self.config, "initializer_range")
+ else self.config.text_config.initializer_range
+ )
+
+ if hasattr(module, "class_embedding"):
+ module.class_embedding.data.normal_(mean=0.0, std=std)
+
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def _supports_sdpa(self):
+ """
+ Retrieve language_model's attribute to check whether the model supports
+ SDPA or not.
+ """
+ return self.language_model._supports_sdpa
+
+
+LLAVA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
+ The tensors corresponding to the input images. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
+ [`CLIPImageProcessor`] for processing images).
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ vision_feature_layer (`int`, *optional*, defaults to -2):
+ The index of the layer to select the vision feature.
+ vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
+ The feature selection strategy used to select the vision feature from the vision backbone.
+ Can be one of `"default"` or `"full"`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """The LLAVA model which consists of a vision backbone and a language model.""",
+ LLAVA_START_DOCSTRING,
+)
+class LlavaForConditionalGeneration(LlavaPreTrainedModel):
+ def __init__(self, config: LlavaConfig):
+ super().__init__(config)
+ self.vision_tower = AutoModel.from_config(config.vision_config)
+
+ self.multi_modal_projector = LlavaMultiModalProjector(config)
+ self.vocab_size = config.text_config.vocab_size
+ self.language_model = AutoModelForCausalLM.from_config(
+ config.text_config, attn_implementation=config._attn_implementation
+ )
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def get_output_embeddings(self):
+ return self.language_model.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ def set_decoder(self, decoder):
+ self.language_model.set_decoder(decoder)
+
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ def tie_weights(self):
+ return self.language_model.tie_weights()
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ # update vocab size
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
+ self.vocab_size = model_embeds.num_embeddings
+ return model_embeds
+
+ def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
+ num_images, num_image_patches, embed_dim = image_features.shape
+ batch_size, sequence_length = input_ids.shape
+ left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
+ # 1. Create a mask to know where special image tokens are
+ special_image_token_mask = input_ids == self.config.image_token_index
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
+ # Compute the maximum embed dimension
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
+ batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
+
+ # 2. Compute the positions where text should be written
+ # Calculate new positions for text tokens in merged image-text sequence.
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
+ new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
+ if left_padding:
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
+
+ # 3. Create the full embedding, already padded to the maximum position
+ final_embedding = torch.zeros(
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
+ )
+ final_attention_mask = torch.zeros(
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
+ )
+ if labels is not None:
+ final_labels = torch.full(
+ (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
+ )
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
+ # set the corresponding tensors into their correct target device.
+ target_device = inputs_embeds.device
+ batch_indices, non_image_indices, text_to_overwrite = (
+ batch_indices.to(target_device),
+ non_image_indices.to(target_device),
+ text_to_overwrite.to(target_device),
+ )
+ attention_mask = attention_mask.to(target_device)
+
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"]
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
+ if labels is not None:
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
+
+ # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling
+ image_to_overwrite = torch.all(final_embedding == 0, dim=-1)
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
+
+ if image_to_overwrite.sum() != image_features.shape[:-1].numel():
+ raise ValueError(
+ f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
+ f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
+ )
+
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
+ final_attention_mask |= image_to_overwrite
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
+
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
+ batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
+
+ final_embedding[batch_indices, indices_to_mask] = 0
+
+ if labels is None:
+ final_labels = None
+
+ return final_embedding, final_attention_mask, final_labels, position_ids
+
+ @add_start_docstrings_to_model_forward(LLAVA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=LlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ pixel_values: torch.FloatTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ vision_feature_layer: Optional[int] = None,
+ vision_feature_select_strategy: Optional[str] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, LlavaCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, LlavaForConditionalGeneration
+
+ >>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf")
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
+
+ >>> prompt = "USER: \nWhat's the content of the image? ASSISTANT:"
+ >>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(text=prompt, images=image, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=15)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed"
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ vision_feature_layer = (
+ vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
+ )
+ vision_feature_select_strategy = (
+ vision_feature_select_strategy
+ if vision_feature_select_strategy is not None
+ else self.config.vision_feature_select_strategy
+ )
+
+ if inputs_embeds is None:
+ # 1. Extra the input embeddings
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+
+ # 2. Merge text and images
+ if pixel_values is not None and input_ids.shape[1] != 1:
+ image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
+ # this is not memory efficient at all (output_hidden_states=True) will save all the hidden stated.
+ selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
+
+ if vision_feature_select_strategy == "default":
+ selected_image_feature = selected_image_feature[:, 1:]
+ elif vision_feature_select_strategy == "full":
+ selected_image_feature = selected_image_feature
+ else:
+ raise ValueError(
+ f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}"
+ )
+
+ image_features = self.multi_modal_projector(selected_image_feature)
+ inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
+ image_features, inputs_embeds, input_ids, attention_mask, labels
+ )
+ if labels is None:
+ labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
+
+ # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of
+ # generation with cache
+ elif past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
+ # Retrieve the first layer to inspect the logits and mask out the hidden states
+ # that are set to 0
+ first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
+
+ # Sum all dimensions of head_dim (-2) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941
+ batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0)
+
+ # Get the target length
+ target_length = input_ids.shape[1]
+ past_length = first_layer_past_key_value.shape[-1]
+
+ extended_attention_mask = torch.ones(
+ (attention_mask.shape[0], past_length),
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+
+ # Filter out only the tokens that can be un-attended, this can happen
+ # if one uses Llava + Fused modules where the cache on the
+ # first iteration is already big enough, or if one passes custom cache
+ valid_indices = non_attended_tokens < extended_attention_mask.size(-1)
+ new_batch_index = batch_index[valid_indices]
+ new_non_attended_tokens = non_attended_tokens[valid_indices]
+
+ # Zero-out the places where we don't need to attend
+ extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0
+
+ attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1)
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
+
+ outputs = self.language_model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = outputs[0]
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ if attention_mask is not None:
+ shift_attention_mask = attention_mask[..., 1:]
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
+ else:
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = nn.CrossEntropyLoss()
+ loss = loss_fct(
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return LlavaCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, **kwargs
+ ):
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+ elif self.config.image_token_index in input_ids:
+ input_ids = input_ids[:, input_ids.shape[1] - 1 :]
+ # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the
+ # older attention values, as their corresponding values are not part of the input.
+ if cache_length < past_length and attention_mask is not None:
+ attention_mask = attention_mask[:, -(cache_length + input_ids.shape[1]) :]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ "pixel_values": pixel_values,
+ }
+ )
+ return model_inputs
+
+ def _reorder_cache(self, *args, **kwargs):
+ return self.language_model._reorder_cache(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/llava/processing_llava.py b/venv/lib/python3.10/site-packages/transformers/models/llava/processing_llava.py
new file mode 100644
index 0000000000000000000000000000000000000000..62a46acd3991b9bd22c4f0a28d022f130b6cdf03
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/llava/processing_llava.py
@@ -0,0 +1,135 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for Llava.
+"""
+
+
+from typing import List, Optional, Union
+
+from ...feature_extraction_utils import BatchFeature
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+
+
+class LlavaProcessor(ProcessorMixin):
+ r"""
+ Constructs a Llava processor which wraps a Llava image processor and a Llava tokenizer into a single processor.
+
+ [`LlavaProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
+ [`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`CLIPImageProcessor`], *optional*):
+ The image processor is a required input.
+ tokenizer ([`LlamaTokenizerFast`], *optional*):
+ The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "CLIPImageProcessor"
+ tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None):
+ super().__init__(image_processor, tokenizer)
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ images: ImageInput = None,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length=None,
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
+ ) -> BatchFeature:
+ """
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
+ and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`, *optional*):
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
+ """
+ if images is not None:
+ pixel_values = self.image_processor(images, return_tensors=return_tensors)["pixel_values"]
+ else:
+ pixel_values = None
+ text_inputs = self.tokenizer(
+ text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
+ )
+
+ return BatchFeature(data={**text_inputs, "pixel_values": pixel_values})
+
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ # Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))