diff --git a/.gitattributes b/.gitattributes
index 69a5e53c9f777ed34674f88c6a7ba8447f53d44b..66ae3ebd0a285b6c4f31ce7e0474818b3366c318 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -90,3 +90,4 @@ venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs di
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/nvidia/nvjitlink/lib/libnvJitLink.so.12 filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_ops_train.so.8 filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..f94e1724f7ea2f539fc51c1cfb0e76a4cdc06d1b
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0583b3e1b5715c7324a5a96846f984529327ce654fe6e4ff751c55d08a1f4120
+size 33555612
diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ea92c538b5c4eb6ff4523db81c2579327632f9dd
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a574574e1cf4f612f36c5a950de7dbd5a2fbe95d3ee30fdceb0e508c94e83eb
+size 33555533
diff --git a/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6c1277ea877b9b4264e6510d717a2c0367048168
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:391ae28bf7845a2699f795228e5c7b365b28a4d85132194dcf94a12eb4f44725
+size 33555533
diff --git a/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..ba286a5d2f6aa44b2c721ff64984aa42f3221499
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:299c8182eea4098a354774bfd621db676c0baeae084f0059983af42bbf72f1f7
+size 50332843
diff --git a/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-metadata.json
new file mode 100644
index 0000000000000000000000000000000000000000..7bd260748b976aba8ace56409d87ea4921d1ac1c
--- /dev/null
+++ b/lm-evaluation-harness/wandb/run-20240514_114444-iactyeb3/files/wandb-metadata.json
@@ -0,0 +1,810 @@
+{
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
+ "python": "3.10.12",
+ "heartbeatAt": "2024-05-14T11:44:44.666189",
+ "startedAt": "2024-05-14T11:44:44.209643",
+ "docker": null,
+ "cuda": null,
+ "args": [
+ "--model",
+ "hf",
+ "--model_args",
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100",
+ "--tasks",
+ "indiccopa-hi",
+ "--batch_size",
+ "auto",
+ "--wandb_args",
+ "project=bharatgpt,group=global_step100"
+ ],
+ "state": "running",
+ "program": "-m lm_eval.__main__",
+ "codePathLocal": null,
+ "git": {
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
+ "commit": null
+ },
+ "email": null,
+ "root": "/data/cronscript/lm-evaluation-harness",
+ "host": "vizzhy-150-3",
+ "username": "root",
+ "executable": "/usr/bin/python3",
+ "cpu_count": 76,
+ "cpu_count_logical": 152,
+ "cpu_freq": {
+ "current": 3384.3174802631584,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ "cpu_freq_per_core": [
+ {
+ "current": 3251.15,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3251.225,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3236.114,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3285.304,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3285.185,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3223.021,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3223.232,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3291.956,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3229.554,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3148.525,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3285.184,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3221.545,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ },
+ {
+ "current": 3400.0,
+ "min": 800.0,
+ "max": 3400.0
+ }
+ ],
+ "disk": {
+ "/": {
+ "total": 866.4415092468262,
+ "used": 77.77706527709961
+ }
+ },
+ "memory": {
+ "total": 1007.5000267028809
+ }
+}
diff --git a/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12 b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12
new file mode 100644
index 0000000000000000000000000000000000000000..8dac305a88c17a2eb38a34edf5e89109392aeeba
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia/cublas/lib/libcublas.so.12
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37d1141111d6b935aa84156ad61fd3575350cad8d69d0916e9db7d37eef9b816
+size 107473968
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py b/venv/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b20b30137d2cb7cd06eb86ea2a8c72a8da1c599
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clap/modeling_clap.py
@@ -0,0 +1,2297 @@
+# coding=utf-8
+# Copyright 2023 The LAION-AI Team and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch CLAP model."""
+import collections
+import math
+from dataclasses import dataclass
+from typing import Any, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused"
+
+
+from ..deprecated._archive_maps import CLAP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191
+def interpolate(hidden_states, ratio):
+ """
+ Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)):
+ Input hidden states
+ ratio (`int`):
+ The ratio of the length of the output to the length of the input.
+ """
+ (batch_size, time_length, classes_num) = hidden_states.shape
+ upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1)
+ upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num)
+ return upsampled
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L249
+def window_partition(hidden_states, window_size):
+ """
+ Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size,
+ num_channels)`
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`):
+ Input hidden states
+ window_size (`int`):
+ Window size
+ """
+ batch_size, height, width, num_channels = hidden_states.shape
+
+ hidden_states = hidden_states.view(
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
+ )
+ windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
+ return windows
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263
+def window_reverse(windows, window_size, height, width):
+ """
+ Merges windows to produce higher resolution features.
+ Args:
+ windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):
+ Input windows
+ window_size (`int`):
+ Window size
+ height (`int`):
+ Height of the resized audio
+ width (`int`):
+ Width of the resized audio
+ """
+ num_channels = windows.shape[-1]
+ windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
+ windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
+ return windows
+
+
+# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
+def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html#CLIP-loss-function
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ labels = torch.arange(len(logits), device=logits.device)
+ return nn.functional.cross_entropy(logits, labels)
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Clap
+class ClapTextModelOutput(ModelOutput):
+ """
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The text embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ text_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+class ClapAudioModelOutput(ModelOutput):
+ """
+ ClapAudio model output to mimic the output of the original implementation.
+
+ Args:
+ audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ The Audio embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ audio_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+@dataclass
+# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Clap, vision->audio, Vision->Audio, image->audio
+class ClapOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for audio-text similarity.
+ logits_per_audio:(`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`):
+ The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text
+ similarity scores.
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio
+ similarity scores.
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`].
+ audio_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`].
+ text_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`ClapTextModel`].
+ audio_model_output(`BaseModelOutputWithPooling`):
+ The output of the [`ClapAudioModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_audio: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ audio_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ audio_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "audio_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+# Adapted from transformers.models.swin.modeling_swin.SwinDropPath
+class ClapDropPath(nn.Module):
+ """
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly
+ refactored version of the `SwinDropPath` implementation.
+ """
+
+ def __init__(self, drop_prob=None):
+ super().__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, hidden_states):
+ if self.drop_prob == 0.0 or not self.training:
+ return hidden_states
+
+ keep_prob = 1 - self.drop_prob
+ # work with diff dim tensors, not just 2D ConvNets
+ shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1)
+
+ random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device)
+ random_tensor.floor_() # binarize
+ output = hidden_states.div(keep_prob) * random_tensor
+ return output
+
+
+# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/feature_fusion.py#L133
+class ClapAudioAFFBlock(nn.Module):
+ r"""
+ ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement
+ the 1D version.
+ """
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__()
+ channels = config.patch_embeds_hidden_size
+ downsize_ratio = config.aff_block_r
+ inter_channels = int(channels // downsize_ratio)
+
+ self.local_att = nn.Sequential(
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(inter_channels),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(channels),
+ )
+ self.global_att = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(inter_channels),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
+ nn.BatchNorm2d(channels),
+ )
+
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, hidden_states, residual):
+ attention_input = hidden_states + residual
+
+ fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input)
+ fused_layer_output = self.sigmoid(fused_layer_output)
+
+ output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output)
+ return output
+
+
+class ClapAudioPatchEmbed(nn.Module):
+ """
+ This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the
+ Transformer block.
+ """
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__()
+ img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size
+ patch_size = (
+ (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size
+ )
+ patch_stride = (
+ (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride
+ )
+
+ self.img_size = img_size
+ self.patch_stride = patch_stride
+
+ self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
+ self.num_patches = self.grid_size[0] * self.grid_size[1]
+
+ self.flatten = config.flatten_patch_embeds
+ self.enable_fusion = config.enable_fusion
+
+ padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
+
+ scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1
+
+ self.proj = nn.Conv2d(
+ config.patch_embed_input_channels * scale_factor,
+ config.patch_embeds_hidden_size,
+ kernel_size=patch_size,
+ stride=patch_stride,
+ padding=padding,
+ )
+
+ self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity()
+ if self.enable_fusion:
+ self.fusion_model = ClapAudioAFFBlock(config)
+ self.mel_conv2d = nn.Conv2d(
+ config.patch_embed_input_channels,
+ config.patch_embeds_hidden_size,
+ kernel_size=(patch_size[0], patch_size[1] * 3),
+ stride=(patch_stride[0], patch_stride[1] * 3),
+ padding=padding,
+ )
+
+ def forward(self, hidden_states, is_longer_idx=None):
+ if self.enable_fusion:
+ # retrieve the last mel as we have transposed the input
+ global_hidden_states = hidden_states[:, 0:1, :, :]
+
+ # global processing
+ batch_size, num_channels, height, width = global_hidden_states.shape
+
+ if height != self.img_size[0] or width != self.img_size[1]:
+ raise ValueError(
+ f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
+ )
+
+ global_hidden_states = self.proj(global_hidden_states)
+ output_width = global_hidden_states.size(-1)
+ if len(is_longer_idx) > 0:
+ # local processing
+ local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous()
+ batch_size, num_channels, height, width = local_hidden_states.shape
+ local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width)
+
+ local_hidden_states = self.mel_conv2d(local_hidden_states)
+
+ _, features, height, width = local_hidden_states.shape
+ local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width)
+ local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
+
+ local_width = local_hidden_states.size(-1)
+ local_hidden_states = torch.nn.functional.pad(
+ local_hidden_states, (0, output_width - local_width), "constant", 0
+ )
+
+ global_hidden_states[is_longer_idx] = self.fusion_model(
+ global_hidden_states[is_longer_idx], local_hidden_states
+ )
+ hidden_states = global_hidden_states
+ else:
+ _, _, height, width = hidden_states.shape
+ if height != self.img_size[0] or width != self.img_size[1]:
+ raise ValueError(
+ f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
+ )
+ hidden_states = self.proj(hidden_states)
+
+ if self.flatten:
+ hidden_states = hidden_states.flatten(2).transpose(1, 2)
+ hidden_states = self.norm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio
+class ClapAudioSelfAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ if dim % num_heads != 0:
+ raise ValueError(
+ f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
+ )
+
+ self.num_attention_heads = num_heads
+ self.attention_head_size = int(dim / num_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.window_size = (
+ window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
+ )
+
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
+ )
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
+ coords_flatten = torch.flatten(coords, 1)
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
+ relative_coords[:, :, 0] += self.window_size[0] - 1
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1)
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+ self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ batch_size, dim, num_channels = hidden_states.shape
+ mixed_query_layer = self.query(hidden_states)
+
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
+ relative_position_bias = relative_position_bias.view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
+ )
+
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
+ attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ClapAudioModel forward() function)
+ mask_shape = attention_mask.shape[0]
+ attention_scores = attention_scores.view(
+ batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
+ )
+ attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
+ attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->ClapAudio
+class ClapAudioSelfOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, dim)
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->ClapAudio
+class ClapAudioAttention(nn.Module):
+ def __init__(self, config, dim, num_heads, window_size):
+ super().__init__()
+ self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size)
+ self.output = ClapAudioSelfOutput(config, dim)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->ClapAudio
+class ClapAudioIntermediate(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->ClapAudio
+class ClapAudioOutput(nn.Module):
+ def __init__(self, config, dim):
+ super().__init__()
+ self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinLayer with SwinDropPath->ClapDropPath, Swin->ClapAudio
+class ClapAudioLayer(nn.Module):
+ def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.shift_size = shift_size
+ self.window_size = config.window_size
+ self.input_resolution = input_resolution
+ self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size)
+ self.drop_path = ClapDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
+ self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
+ self.intermediate = ClapAudioIntermediate(config, dim)
+ self.output = ClapAudioOutput(config, dim)
+
+ def set_shift_and_window_size(self, input_resolution):
+ if min(input_resolution) <= self.window_size:
+ # if window size is larger than input resolution, we don't partition windows
+ self.shift_size = 0
+ self.window_size = min(input_resolution)
+
+ def get_attn_mask(self, height, width, dtype):
+ if self.shift_size > 0:
+ # calculate attention mask for SW-MSA
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
+ height_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ width_slices = (
+ slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None),
+ )
+ count = 0
+ for height_slice in height_slices:
+ for width_slice in width_slices:
+ img_mask[:, height_slice, width_slice, :] = count
+ count += 1
+
+ mask_windows = window_partition(img_mask, self.window_size)
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+ else:
+ attn_mask = None
+ return attn_mask
+
+ def maybe_pad(self, hidden_states, height, width):
+ pad_right = (self.window_size - width % self.window_size) % self.window_size
+ pad_bottom = (self.window_size - height % self.window_size) % self.window_size
+ pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
+ hidden_states = nn.functional.pad(hidden_states, pad_values)
+ return hidden_states, pad_values
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ if not always_partition:
+ self.set_shift_and_window_size(input_dimensions)
+ else:
+ pass
+ height, width = input_dimensions
+ batch_size, _, channels = hidden_states.size()
+ shortcut = hidden_states
+
+ hidden_states = self.layernorm_before(hidden_states)
+
+ hidden_states = hidden_states.view(batch_size, height, width, channels)
+
+ # pad hidden_states to multiples of window size
+ hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
+
+ _, height_pad, width_pad, _ = hidden_states.shape
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ else:
+ shifted_hidden_states = hidden_states
+
+ # partition windows
+ hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
+ hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
+ attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
+ if attn_mask is not None:
+ attn_mask = attn_mask.to(hidden_states_windows.device)
+
+ attention_outputs = self.attention(
+ hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
+ )
+
+ attention_output = attention_outputs[0]
+
+ attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
+ shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ attention_windows = shifted_windows
+
+ was_padded = pad_values[3] > 0 or pad_values[5] > 0
+ if was_padded:
+ attention_windows = attention_windows[:, :height, :width, :].contiguous()
+
+ attention_windows = attention_windows.view(batch_size, height * width, channels)
+
+ hidden_states = shortcut + self.drop_path(attention_windows)
+
+ layer_output = self.layernorm_after(hidden_states)
+ layer_output = self.intermediate(layer_output)
+ layer_output = hidden_states + self.output(layer_output)
+
+ layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
+ return layer_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->ClapAudio
+class ClapAudioStage(nn.Module):
+ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
+ super().__init__()
+ self.config = config
+ self.dim = dim
+ self.blocks = nn.ModuleList(
+ [
+ ClapAudioLayer(
+ config=config,
+ dim=dim,
+ input_resolution=input_resolution,
+ num_heads=num_heads,
+ shift_size=0 if (i % 2 == 0) else config.window_size // 2,
+ )
+ for i in range(depth)
+ ]
+ )
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
+ else:
+ self.downsample = None
+
+ self.pointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ input_dimensions: Tuple[int, int],
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ height, width = input_dimensions
+ for i, layer_module in enumerate(self.blocks):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = hidden_states
+ if self.downsample is not None:
+ height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
+ output_dimensions = (height, width, height_downsampled, width_downsampled)
+ hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
+ else:
+ output_dimensions = (height, width, height, width)
+
+ stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
+
+ if output_attentions:
+ stage_outputs += layer_outputs[1:]
+ return stage_outputs
+
+
+# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging with Swin->ClapAudio
+class ClapAudioPatchMerging(nn.Module):
+ """
+ Patch Merging Layer.
+
+ Args:
+ input_resolution (`Tuple[int]`):
+ Resolution of input feature.
+ dim (`int`):
+ Number of input channels.
+ norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
+ Normalization layer class.
+ """
+
+ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
+ super().__init__()
+ self.input_resolution = input_resolution
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def maybe_pad(self, input_feature, height, width):
+ should_pad = (height % 2 == 1) or (width % 2 == 1)
+ if should_pad:
+ pad_values = (0, 0, 0, width % 2, 0, height % 2)
+ input_feature = nn.functional.pad(input_feature, pad_values)
+
+ return input_feature
+
+ def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
+ height, width = input_dimensions
+ # `dim` is height * width
+ batch_size, dim, num_channels = input_feature.shape
+
+ input_feature = input_feature.view(batch_size, height, width, num_channels)
+ # pad input to be disible by width and height, if needed
+ input_feature = self.maybe_pad(input_feature, height, width)
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_0 = input_feature[:, 0::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_1 = input_feature[:, 1::2, 0::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_2 = input_feature[:, 0::2, 1::2, :]
+ # [batch_size, height/2, width/2, num_channels]
+ input_feature_3 = input_feature[:, 1::2, 1::2, :]
+ # batch_size height/2 width/2 4*num_channels
+ input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
+ input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
+
+ input_feature = self.norm(input_feature)
+ input_feature = self.reduction(input_feature)
+
+ return input_feature
+
+
+class ClapAudioEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_layers = len(config.depths)
+
+ self.config = config
+ self.patch_embed = ClapAudioPatchEmbed(config)
+ self.enable_fusion = config.enable_fusion
+ self.patch_stride = self.patch_embed.patch_stride
+ self.spec_size = config.spec_size
+ self.freq_ratio = config.spec_size // config.num_mel_bins
+
+ self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1))
+
+ drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
+
+ grid_size = self.patch_embed.grid_size
+ self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)]
+
+ self.layers = nn.ModuleList(
+ [
+ ClapAudioStage(
+ config=config,
+ dim=int(config.patch_embeds_hidden_size * 2**i_layer),
+ input_resolution=self.input_resolutions[i_layer],
+ depth=config.depths[i_layer],
+ num_heads=config.num_attention_heads[i_layer],
+ drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
+ downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None,
+ )
+ for i_layer in range(self.num_layers)
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ self.batch_norm = nn.BatchNorm2d(config.num_mel_bins)
+ self.norm = nn.LayerNorm(self.num_features)
+ self.depths = config.depths
+ self.avgpool = nn.AdaptiveAvgPool1d(1)
+
+ def reshape_mel2img(self, normalized_input_features):
+ """
+ The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel
+ should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`].
+ """
+ _, _, time_length, freq_length = normalized_input_features.shape
+
+ spec_width = int(self.spec_size * self.freq_ratio)
+ spec_heigth = self.spec_size // self.freq_ratio
+
+ if time_length > spec_width or freq_length > spec_heigth:
+ raise ValueError("the wav size should be less than or equal to the swin input size")
+
+ # to avoid bicubic zero error
+ if time_length < spec_width:
+ normalized_input_features = nn.functional.interpolate(
+ normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True
+ )
+ if freq_length < spec_heigth:
+ normalized_input_features = nn.functional.interpolate(
+ normalized_input_features, (time_length, spec_heigth), mode="bicubic", align_corners=True
+ )
+
+ batch, channels, time, freq = normalized_input_features.shape
+
+ # batch_size, channels, spec_width, spec_heigth --> batch_size, channels, spec_heigth * freq_ratio, spec_width // freq_ratio
+ normalized_input_features = normalized_input_features.reshape(
+ batch, channels * self.freq_ratio, time // self.freq_ratio, freq
+ )
+ normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous()
+ normalized_input_features = normalized_input_features.reshape(
+ batch, channels, freq * self.freq_ratio, time // self.freq_ratio
+ )
+
+ return normalized_input_features
+
+ def forward(
+ self,
+ input_features,
+ is_longer: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ output_hidden_states_before_downsampling: Optional[bool] = False,
+ always_partition: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple, ClapAudioModelOutput]:
+ input_features = input_features.transpose(1, 3)
+ normalized_input_features = self.batch_norm(input_features)
+ normalized_input_features = normalized_input_features.transpose(1, 3)
+
+ is_longer_list_idx = None
+ if self.enable_fusion:
+ is_longer_list = is_longer.to(input_features.device)
+ is_longer_list_idx = torch.where(is_longer_list == 1)[0]
+
+ hidden_states = self.reshape_mel2img(normalized_input_features)
+
+ frames_num = hidden_states.shape[2]
+
+ hidden_states = self.patch_embed(hidden_states, is_longer_list_idx)
+
+ all_hidden_states = () if output_hidden_states else None
+ all_reshaped_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ input_dimensions = self.input_resolutions[0]
+
+ if output_hidden_states:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ for i, layer_module in enumerate(self.layers):
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ input_dimensions = self.input_resolutions[i]
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
+ )
+
+ hidden_states = layer_outputs[0]
+
+ hidden_states_before_downsampling = layer_outputs[1]
+ output_dimensions = layer_outputs[2]
+
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
+
+ if output_hidden_states and output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ # here we use the original (not downsampled) height and width
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
+ )
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states_before_downsampling,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
+ batch_size, _, hidden_size = hidden_states.shape
+ # rearrange batch_size (height width) channels -> batch_size channel height width
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
+ all_hidden_states += (hidden_states,)
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
+
+ if output_attentions:
+ all_self_attentions += layer_outputs[3:]
+
+ last_hidden_state = self.norm(hidden_states)
+
+ batch_size, _, n_channels = last_hidden_state.shape
+
+ freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
+ temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
+
+ last_hidden_state = (
+ last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape)
+ )
+
+ batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape
+ # group 2D CNN
+ c_freq_bin = n_frequencies // self.freq_ratio
+ last_hidden_state = last_hidden_state.reshape(
+ batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp
+ )
+ last_hidden_state = (
+ last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1)
+ )
+ latent_output = self.avgpool(torch.flatten(last_hidden_state, 2))
+ latent_output = torch.flatten(latent_output, 1)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ last_hidden_state,
+ latent_output,
+ all_reshaped_hidden_states,
+ all_self_attentions,
+ ]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=latent_output,
+ hidden_states=all_reshaped_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+CLAP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ClapConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+CLAP_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CLAP_AUDIO_INPUTS_DOCSTRING = r"""
+ Args:
+ input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
+ retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
+ is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):
+ Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance
+ the features.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+CLAP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
+ retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class ClapProjectionLayer(nn.Module):
+ def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]):
+ super().__init__()
+ self.config = config
+ hidden_size = config.hidden_size
+ projection_dim = config.projection_dim
+
+ self.linear1 = nn.Linear(hidden_size, projection_dim)
+ self.activation = ACT2FN[config.projection_hidden_act]
+ self.linear2 = nn.Linear(projection_dim, projection_dim)
+
+ def forward(self, hidden_states):
+ hidden_states = self.linear1(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.linear2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True
+class ClapTextEmbeddings(nn.Module):
+ """
+ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
+ """
+
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True
+ )
+ self.register_buffer(
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True
+ )
+
+ # End copy
+ self.padding_idx = config.pad_token_id
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ def forward(
+ self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
+ ):
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ if self.position_embedding_type == "absolute":
+ position_embeddings = self.position_embeddings(position_ids)
+ embeddings += position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ClapText
+class ClapTextSelfAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.position_embedding_type = position_embedding_type or getattr(
+ config, "position_embedding_type", "absolute"
+ )
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ self.max_position_embeddings = config.max_position_embeddings
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
+
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ use_cache = past_key_value is not None
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
+ if use_cache:
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
+ -1, 1
+ )
+ else:
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
+ distance = position_ids_l - position_ids_r
+
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
+
+ if self.position_embedding_type == "relative_key":
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores
+ elif self.position_embedding_type == "relative_key_query":
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in ClapTextModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class ClapTextSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ClapText
+class ClapTextAttention(nn.Module):
+ def __init__(self, config, position_embedding_type=None):
+ super().__init__()
+ self.self = ClapTextSelfAttention(config, position_embedding_type=position_embedding_type)
+ self.output = ClapTextSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class ClapTextIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class ClapTextOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ClapText
+class ClapTextLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = ClapTextAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = ClapTextAttention(config, position_embedding_type="absolute")
+ self.intermediate = ClapTextIntermediate(config)
+ self.output = ClapTextOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ClapText
+class ClapTextEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class ClapTextPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+class ClapPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ClapConfig
+ base_model_prefix = "clap"
+ supports_gradient_checkpointing = False
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor
+
+ if isinstance(module, ClapTextEmbeddings):
+ module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ elif isinstance(module, ClapModel):
+ nn.init.normal_(module.logit_scale_a, std=factor * 0.02)
+ nn.init.normal_(module.logit_scale_t, std=factor * 0.02)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=factor * 0.02)
+
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, (nn.Conv2d, nn.Linear)):
+ in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor
+ nn.init.normal_(module.weight, std=in_proj_std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+
+class ClapAudioModel(ClapPreTrainedModel):
+ config_class = ClapAudioConfig
+ main_input_name = "input_features"
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__(config)
+ self.audio_encoder = ClapAudioEncoder(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.audio_encoder.patch_embed.proj
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig)
+ def forward(
+ self,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoProcessor, ClapAudioModel
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused")
+ >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused")
+
+ >>> inputs = processor(audios=audio_sample, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ return self.audio_encoder(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class ClapTextModel(ClapPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in *Attention is
+ all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
+ Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+
+ .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
+
+ """
+
+ config_class = ClapTextConfig
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->ClapText
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = ClapTextEmbeddings(config)
+ self.encoder = ClapTextEncoder(config)
+
+ self.pooler = ClapTextPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ # Copied from transformers.models.bert.modeling_bert.BertModel.forward
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ past_key_values_length=past_key_values_length,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(CLAP_START_DOCSTRING)
+class ClapModel(ClapPreTrainedModel):
+ config_class = ClapConfig
+
+ def __init__(self, config: ClapConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, ClapTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type ClapTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.audio_config, ClapAudioConfig):
+ raise ValueError(
+ "config.audio_config is expected to be of type ClapAudioConfig but is of type"
+ f" {type(config.audio_config)}."
+ )
+
+ text_config = config.text_config
+ audio_config = config.audio_config
+
+ self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
+ self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
+
+ self.projection_dim = config.projection_dim
+
+ self.text_model = ClapTextModel(text_config)
+ self.text_projection = ClapProjectionLayer(text_config)
+
+ self.audio_model = ClapAudioModel(audio_config)
+ self.audio_projection = ClapProjectionLayer(audio_config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the pooled output of [`ClapTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ClapModel
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output
+ text_features = self.text_projection(pooled_output)
+ text_features = F.normalize(text_features, dim=-1)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ def get_audio_features(
+ self,
+ input_features: Optional[torch.Tensor] = None,
+ is_longer: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by
+ applying the projection layer to the pooled output of [`ClapAudioModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoFeatureExtractor, ClapModel
+ >>> import torch
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused")
+ >>> random_audio = torch.rand((16_000))
+ >>> inputs = feature_extractor(random_audio, return_tensors="pt")
+ >>> audio_features = model.get_audio_features(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ return_dict=return_dict,
+ )
+
+ pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+
+ audio_features = self.audio_projection(pooled_output)
+ audio_features = F.normalize(audio_features, dim=-1)
+
+ return audio_features
+
+ @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoProcessor, ClapModel
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
+ >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"]
+
+ >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True)
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score
+ >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+ audio_embeds = self.audio_projection(audio_embeds)
+
+ text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale_text = self.logit_scale_t.exp()
+ logit_scale_audio = self.logit_scale_a.exp()
+ logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text
+ logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio
+
+ loss = None
+ if return_loss:
+ caption_loss = contrastive_loss(logits_per_text)
+ audio_loss = contrastive_loss(logits_per_audio.t())
+ loss = (caption_loss + audio_loss) / 2.0
+
+ if not return_dict:
+ output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return ClapOutput(
+ loss=loss,
+ logits_per_audio=logits_per_audio,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ audio_embeds=audio_embeds,
+ text_model_output=text_outputs,
+ audio_model_output=audio_outputs,
+ )
+
+
+@add_start_docstrings(
+ """
+ CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ CLAP_START_DOCSTRING,
+)
+class ClapTextModelWithProjection(ClapPreTrainedModel):
+ config_class = ClapTextConfig
+
+ def __init__(self, config: ClapTextConfig):
+ super().__init__(config)
+ self.text_model = ClapTextModel(config)
+ self.text_projection = ClapProjectionLayer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.text_model.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.text_model.embeddings.word_embeddings = value
+
+ @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapTextModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, ClapTextModelWithProjection
+
+ >>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused")
+ >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
+
+ >>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> text_embeds = outputs.text_embeds
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output
+
+ text_embeds = self.text_projection(pooled_output)
+
+ if not return_dict:
+ outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return ClapTextModelOutput(
+ text_embeds=text_embeds,
+ last_hidden_state=text_outputs.last_hidden_state,
+ hidden_states=text_outputs.hidden_states,
+ attentions=text_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).
+ """,
+ CLAP_START_DOCSTRING,
+)
+class ClapAudioModelWithProjection(ClapPreTrainedModel):
+ config_class = ClapAudioConfig
+ main_input_name = "input_features"
+
+ def __init__(self, config: ClapAudioConfig):
+ super().__init__(config)
+ self.audio_model = ClapAudioModel(config)
+ self.audio_projection = ClapProjectionLayer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.audio_model.audio_encoder.patch_embed.proj
+
+ @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig)
+ def forward(
+ self,
+ input_features: Optional[torch.FloatTensor] = None,
+ is_longer: Optional[torch.BoolTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClapAudioModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from datasets import load_dataset
+ >>> from transformers import ClapAudioModelWithProjection, ClapProcessor
+
+ >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused")
+ >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused")
+
+ >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
+ >>> audio_sample = dataset["train"]["audio"][0]["array"]
+
+ >>> inputs = processor(audios=audio_sample, return_tensors="pt")
+ >>> outputs = model(**inputs)
+ >>> audio_embeds = outputs.audio_embeds
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ audio_outputs = self.audio_model(
+ input_features=input_features,
+ is_longer=is_longer,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
+
+ audio_embeds = self.audio_projection(pooled_output)
+
+ if not return_dict:
+ outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:]
+ return tuple(output for output in outputs if output is not None)
+
+ return ClapAudioModelOutput(
+ audio_embeds=audio_embeds,
+ last_hidden_state=audio_outputs.last_hidden_state,
+ attentions=audio_outputs.attentions,
+ hidden_states=audio_outputs.hidden_states,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py b/venv/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
new file mode 100644
index 0000000000000000000000000000000000000000..87799899945fa669d3980e8cc6c15192cf7a2ba5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clap/processing_clap.py
@@ -0,0 +1,117 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Audio/Text processor class for CLAP
+"""
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding
+
+
+class ClapProcessor(ProcessorMixin):
+ r"""
+ Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor.
+
+ [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the
+ [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information.
+
+ Args:
+ feature_extractor ([`ClapFeatureExtractor`]):
+ The audio processor is a required input.
+ tokenizer ([`RobertaTokenizerFast`]):
+ The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "ClapFeatureExtractor"
+ tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast")
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, text=None, audios=None, return_tensors=None, **kwargs):
+ """
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
+ and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to
+ encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to
+ ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the
+ doctsring of the above two methods for more information.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
+ The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case
+ of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels,
+ and T the sample length of the audio.
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors of a particular framework. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return NumPy `np.ndarray` objects.
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
+
+ Returns:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
+ `None`).
+ - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`.
+ """
+ sampling_rate = kwargs.pop("sampling_rate", None)
+
+ if text is None and audios is None:
+ raise ValueError("You have to specify either text or audios. Both cannot be none.")
+
+ if text is not None:
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
+
+ if audios is not None:
+ audio_features = self.feature_extractor(
+ audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs
+ )
+
+ if text is not None and audios is not None:
+ encoding["input_features"] = audio_features.input_features
+ return encoding
+ elif text is not None:
+ return encoding
+ else:
+ return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors)
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ feature_extractor_input_names = self.feature_extractor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf8afcf228964aaf0f64265dd25a420df948f83c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53eb7a2fafab9f72f56e8e72902d3970db9d60c7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/configuration_open_llama.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1449655e73716fa5ddc8c745eeb11e0258f734b2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/__pycache__/modeling_open_llama.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py
new file mode 100644
index 0000000000000000000000000000000000000000..0111e031251a2c7108419827f7b8c35eab0edcc3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/open_llama/configuration_open_llama.py
@@ -0,0 +1,170 @@
+# coding=utf-8
+# Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Open-Llama model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import OPEN_LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class OpenLlamaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`OpenLlamaModel`]. It is used to instantiate an
+ Open-Llama model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the
+ [s-JoL/Open-Llama-V1](https://huggingface.co/s-JoL/Open-Llama-V1).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 32000):
+ Vocabulary size of the Open-Llama model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`OpenLlamaModel`]
+ hidden_size (`int`, *optional*, defaults to 4096):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 11008):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 32):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 32):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
+ Whether to tie weight embeddings
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ rope_scaling (`Dict`, *optional*):
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
+ these scaling strategies behave:
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
+ experimental feature, subject to breaking API changes in future versions.
+
+ Example:
+
+ ```python
+ >>> from transformers import OpenLlamaModel, OpenLlamaConfig
+
+ >>> # Initializing a Open-Llama open_llama-7b style configuration
+ >>> configuration = OpenLlamaConfig()
+
+ >>> # Initializing a model from the open_llama-7b style configuration
+ >>> model = OpenLlamaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "open-llama"
+
+ def __init__(
+ self,
+ vocab_size=100000,
+ hidden_size=4096,
+ intermediate_size=11008,
+ num_hidden_layers=32,
+ num_attention_heads=32,
+ hidden_act="silu",
+ max_position_embeddings=2048,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ tie_word_embeddings=False,
+ use_memory_efficient_attention=True,
+ hidden_dropout_prob=0.1,
+ attention_dropout_prob=0.1,
+ use_stable_embedding=True,
+ shared_input_output_embedding=True,
+ rope_theta=10000.0,
+ rope_scaling=None,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.use_memory_efficient_attention = kwargs.pop(
+ "use_memorry_efficient_attention", use_memory_efficient_attention
+ )
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_dropout_prob = attention_dropout_prob
+ self.use_stable_embedding = use_stable_embedding
+ self.shared_input_output_embedding = shared_input_output_embedding
+ self.rope_theta = rope_theta
+ self.rope_scaling = rope_scaling
+ self._rope_scaling_validation()
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
+
+ # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
+ def _rope_scaling_validation(self):
+ """
+ Validate the `rope_scaling` configuration.
+ """
+ if self.rope_scaling is None:
+ return
+
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
+ raise ValueError(
+ "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
+ )
+ rope_scaling_type = self.rope_scaling.get("type", None)
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
+ raise ValueError(
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
+ )
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dba5e14594e16c19fc1a269a92e968fec35afc26
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__init__.py
@@ -0,0 +1,73 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig"],
+ "tokenization_retribert": ["RetriBertTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_retribert_fast"] = ["RetriBertTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_retribert"] = [
+ "RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "RetriBertModel",
+ "RetriBertPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
+ from .tokenization_retribert import RetriBertTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_retribert_fast import RetriBertTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_retribert import (
+ RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ RetriBertModel,
+ RetriBertPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..881ffefcbab182db76917e8d5144dbd64cdd5625
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7d6cf7ae32358b25b3bb754fcb0bbebc29de78d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/configuration_retribert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da0a29b3567052ea031dcc019f62fd0b824947bb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/modeling_retribert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80a8522ad34fc1fcafea4964d5a1abd1e86a5ca5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2120fb66c9a32c3db3f17c2b2c59268917276b20
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/__pycache__/tokenization_retribert_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c188c7347a8fb85c14748095b762e0a2583cee00
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/configuration_retribert.py
@@ -0,0 +1,107 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" RetriBERT model configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+from .._archive_maps import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class RetriBertConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a
+ RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the RetriBERT
+ [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`RetriBertModel`]
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, *optional*, defaults to 2):
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ share_encoders (`bool`, *optional*, defaults to `True`):
+ Whether or not to use the same Bert-type encoder for the queries and document
+ projection_dim (`int`, *optional*, defaults to 128):
+ Final dimension of the query and document representation after projection
+ """
+
+ model_type = "retribert"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=8,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ share_encoders=True,
+ projection_dim=128,
+ pad_token_id=0,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.share_encoders = share_encoders
+ self.projection_dim = projection_dim
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dba8a276eeb56c379f782ea2bd5e1be7119cfe7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/modeling_retribert.py
@@ -0,0 +1,218 @@
+# coding=utf-8
+# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+RetriBERT model
+"""
+
+
+import math
+from typing import Optional
+
+import torch
+import torch.utils.checkpoint as checkpoint
+from torch import nn
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import add_start_docstrings, logging
+from ...bert.modeling_bert import BertModel
+from .configuration_retribert import RetriBertConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
+class RetriBertPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = RetriBertConfig
+ load_tf_weights = None
+ base_model_prefix = "retribert"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+RETRIBERT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ """Bert Based model to embed queries or document for document retrieval.""",
+ RETRIBERT_START_DOCSTRING,
+)
+class RetriBertModel(RetriBertPreTrainedModel):
+ def __init__(self, config: RetriBertConfig) -> None:
+ super().__init__(config)
+ self.projection_dim = config.projection_dim
+
+ self.bert_query = BertModel(config)
+ self.bert_doc = None if config.share_encoders else BertModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
+ self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
+
+ self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def embed_sentences_checkpointed(
+ self,
+ input_ids,
+ attention_mask,
+ sent_encoder,
+ checkpoint_batch_size=-1,
+ ):
+ # reproduces BERT forward pass with checkpointing
+ if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
+ return sent_encoder(input_ids, attention_mask=attention_mask)[1]
+ else:
+ # prepare implicit variables
+ device = input_ids.device
+ input_shape = input_ids.size()
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+ head_mask = [None] * sent_encoder.config.num_hidden_layers
+ extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
+ attention_mask, input_shape
+ )
+
+ # define function for checkpointing
+ def partial_encode(*inputs):
+ encoder_outputs = sent_encoder.encoder(
+ inputs[0],
+ attention_mask=inputs[1],
+ head_mask=head_mask,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = sent_encoder.pooler(sequence_output)
+ return pooled_output
+
+ # run embedding layer on everything at once
+ embedding_output = sent_encoder.embeddings(
+ input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
+ )
+ # run encoding and pooling on one mini-batch at a time
+ pooled_output_list = []
+ for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
+ b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
+ b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
+ pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
+ pooled_output_list.append(pooled_output)
+ return torch.cat(pooled_output_list, dim=0)
+
+ def embed_questions(
+ self,
+ input_ids,
+ attention_mask=None,
+ checkpoint_batch_size=-1,
+ ):
+ q_reps = self.embed_sentences_checkpointed(
+ input_ids,
+ attention_mask,
+ self.bert_query,
+ checkpoint_batch_size,
+ )
+ return self.project_query(q_reps)
+
+ def embed_answers(
+ self,
+ input_ids,
+ attention_mask=None,
+ checkpoint_batch_size=-1,
+ ):
+ a_reps = self.embed_sentences_checkpointed(
+ input_ids,
+ attention_mask,
+ self.bert_query if self.bert_doc is None else self.bert_doc,
+ checkpoint_batch_size,
+ )
+ return self.project_doc(a_reps)
+
+ def forward(
+ self,
+ input_ids_query: torch.LongTensor,
+ attention_mask_query: Optional[torch.FloatTensor],
+ input_ids_doc: torch.LongTensor,
+ attention_mask_doc: Optional[torch.FloatTensor],
+ checkpoint_batch_size: int = -1,
+ ) -> torch.FloatTensor:
+ r"""
+ Args:
+ input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary for the queries in a batch.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary for the documents in a batch.
+ attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on documents padding token indices.
+ checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
+ If greater than 0, uses gradient checkpointing to only compute sequence representation on
+ `checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
+ all document representations in the batch.
+
+ Return:
+ `torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
+ corresponding document and each document to its corresponding query in the batch
+ """
+ device = input_ids_query.device
+ q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
+ a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
+ compare_scores = torch.mm(q_reps, a_reps.t())
+ loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
+ loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
+ loss = (loss_qa + loss_aq) / 2
+ return loss
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
new file mode 100644
index 0000000000000000000000000000000000000000..c991f3972230bd9027f43137b2a51550d4655f4d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert.py
@@ -0,0 +1,517 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for RetriBERT."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ....tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class RetriBertTokenizer(PreTrainedTokenizer):
+ r"""
+ Constructs a RetriBERT tokenizer.
+
+ [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting
+ and wordpiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
+ to: this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size
+ def vocab_size(self):
+ return len(self.vocab)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize
+ def _tokenize(self, text, split_special_tokens=False):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
+ ):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..97fbfc07d30ca65cd4d31dc2e221d0ef22073175
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/retribert/tokenization_retribert_fast.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for RetriBERT."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ....tokenization_utils_fast import PreTrainedTokenizerFast
+from ....utils import logging
+from .tokenization_retribert import RetriBertTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class RetriBertTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library).
+
+ [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
+ splitting and wordpiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ clean_text (`bool`, *optional*, defaults to `True`):
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
+ whitespaces by the classic one.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
+ The prefix for subwords.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = RetriBertTokenizer
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
+ ):
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
+ normalizer_state["lowercase"] = do_lower_case
+ normalizer_state["strip_accents"] = strip_accents
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
+
+ self.do_lower_case = do_lower_case
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+
+ if token_ids_1 is not None:
+ output += token_ids_1 + [self.sep_token_id]
+
+ return output
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3674e19665ca74e1e6ee3ac92ca812e54580007
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
+ "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_transfo_xl"] = [
+ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "AdaptiveEmbedding",
+ "TransfoXLForSequenceClassification",
+ "TransfoXLLMHeadModel",
+ "TransfoXLModel",
+ "TransfoXLPreTrainedModel",
+ "load_tf_weights_in_transfo_xl",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_transfo_xl"] = [
+ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFAdaptiveEmbedding",
+ "TFTransfoXLForSequenceClassification",
+ "TFTransfoXLLMHeadModel",
+ "TFTransfoXLMainLayer",
+ "TFTransfoXLModel",
+ "TFTransfoXLPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
+ from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_transfo_xl import (
+ TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ AdaptiveEmbedding,
+ TransfoXLForSequenceClassification,
+ TransfoXLLMHeadModel,
+ TransfoXLModel,
+ TransfoXLPreTrainedModel,
+ load_tf_weights_in_transfo_xl,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_transfo_xl import (
+ TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFAdaptiveEmbedding,
+ TFTransfoXLForSequenceClassification,
+ TFTransfoXLLMHeadModel,
+ TFTransfoXLMainLayer,
+ TFTransfoXLModel,
+ TFTransfoXLPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b09ff2eb7aae923c3e11091fe54b8b6a7d38752c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..524db4516a5c8f801719d6e7f6a95a088e12e626
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/configuration_transfo_xl.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09d5aee9e58f0b81a33e1ab52f455caeed75e903
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/convert_transfo_xl_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03d7b120feab1b5fc1df08690bcbef0857974129
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54caebda4883fec0f285d5f3f2c82c2ce78e0066
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_tf_transfo_xl_utilities.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b55d4e8c87c80635bd2ab92bd8a4a553b6e3617
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7fc1c94f4b440cf288d7d238ce81902b3776c71d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/modeling_transfo_xl_utilities.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19ac11cefa082e2fa139bb32caf03efb05196adb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/__pycache__/tokenization_transfo_xl.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..50bf94ae7ea3983d40dcf3d03c2e4e1027c23c80
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py
@@ -0,0 +1,189 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Transformer XL configuration"""
+
+from ....configuration_utils import PretrainedConfig
+from ....utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from .._archive_maps import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TransfoXLConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`TransfoXLModel`] or a [`TFTransfoXLModel`]. It is
+ used to instantiate a Transformer-XL model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the TransfoXL
+ [transfo-xl/transfo-xl-wt103](https://huggingface.co/transfo-xl/transfo-xl-wt103) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 267735):
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`TransfoXLModel`] or [`TFTransfoXLModel`].
+ cutoffs (`List[int]`, *optional*, defaults to `[20000, 40000, 200000]`):
+ Cutoffs for the adaptive softmax.
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the model's hidden states.
+ d_embed (`int`, *optional*, defaults to 1024):
+ Dimensionality of the embeddings
+ n_head (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ d_head (`int`, *optional*, defaults to 64):
+ Dimensionality of the model's heads.
+ d_inner (`int`, *optional*, defaults to 4096):
+ Inner dimension in FF
+ div_val (`int`, *optional*, defaults to 4):
+ Divident value for adapative input and softmax
+ pre_lnorm (`boolean`, *optional*, defaults to `False`):
+ Whether or not to apply LayerNorm to the input instead of the output in the blocks.
+ n_layer (`int`, *optional*, defaults to 18):
+ Number of hidden layers in the Transformer encoder.
+ mem_len (`int`, *optional*, defaults to 1600):
+ Length of the retained previous heads.
+ clamp_len (`int`, *optional*, defaults to 1000):
+ Use the same pos embeddings after clamp_len.
+ same_length (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use the same attn length for all tokens
+ proj_share_all_but_first (`boolean`, *optional*, defaults to `True`):
+ True to share all but first projs, False not to share.
+ attn_type (`int`, *optional*, defaults to 0):
+ Attention type. 0 for Transformer-XL, 1 for Shaw et al, 2 for Vaswani et al, 3 for Al Rfou et al.
+ sample_softmax (`int`, *optional*, defaults to -1):
+ Number of samples in the sampled softmax.
+ adaptive (`boolean`, *optional*, defaults to `True`):
+ Whether or not to use adaptive softmax.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ dropatt (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ untie_r (`boolean`, *optional*, defaults to `True`):
+ Whether ot not to untie relative position biases.
+ init (`str`, *optional*, defaults to `"normal"`):
+ Parameter initializer to use.
+ init_range (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by U(-init_range, init_range).
+ proj_init_std (`float`, *optional*, defaults to 0.01):
+ Parameters initialized by N(0, init_std)
+ init_std (`float`, *optional*, defaults to 0.02):
+ Parameters initialized by N(0, init_std)
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers
+ eos_token_id (`int`, *optional*, defaults to 0):
+ End of stream token id.
+
+ Examples:
+
+ ```python
+ >>> from transformers import TransfoXLConfig, TransfoXLModel
+
+ >>> # Initializing a Transformer XL configuration
+ >>> configuration = TransfoXLConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = TransfoXLModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "transfo-xl"
+ keys_to_ignore_at_inference = ["mems"]
+ attribute_map = {
+ "n_token": "vocab_size",
+ "hidden_size": "d_model",
+ "num_attention_heads": "n_head",
+ "num_hidden_layers": "n_layer",
+ }
+
+ def __init__(
+ self,
+ vocab_size=267735,
+ cutoffs=[20000, 40000, 200000],
+ d_model=1024,
+ d_embed=1024,
+ n_head=16,
+ d_head=64,
+ d_inner=4096,
+ div_val=4,
+ pre_lnorm=False,
+ n_layer=18,
+ mem_len=1600,
+ clamp_len=1000,
+ same_length=True,
+ proj_share_all_but_first=True,
+ attn_type=0,
+ sample_softmax=-1,
+ adaptive=True,
+ dropout=0.1,
+ dropatt=0.0,
+ untie_r=True,
+ init="normal",
+ init_range=0.01,
+ proj_init_std=0.01,
+ init_std=0.02,
+ layer_norm_epsilon=1e-5,
+ eos_token_id=0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.cutoffs = []
+ self.cutoffs.extend(cutoffs)
+ if proj_share_all_but_first:
+ self.tie_projs = [False] + [True] * len(self.cutoffs)
+ else:
+ self.tie_projs = [False] + [False] * len(self.cutoffs)
+ self.d_model = d_model
+ self.d_embed = d_embed
+ self.d_head = d_head
+ self.d_inner = d_inner
+ self.div_val = div_val
+ self.pre_lnorm = pre_lnorm
+ self.n_layer = n_layer
+ self.n_head = n_head
+ self.mem_len = mem_len
+ self.same_length = same_length
+ self.attn_type = attn_type
+ self.clamp_len = clamp_len
+ self.sample_softmax = sample_softmax
+ self.adaptive = adaptive
+ self.dropout = dropout
+ self.dropatt = dropatt
+ self.untie_r = untie_r
+ self.init = init
+ self.init_range = init_range
+ self.proj_init_std = proj_init_std
+ self.init_std = init_std
+ self.layer_norm_epsilon = layer_norm_epsilon
+ super().__init__(eos_token_id=eos_token_id, **kwargs)
+
+ @property
+ def max_position_embeddings(self):
+ # Message copied from Transformer-XL documentation
+ logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
+ return -1
+
+ @max_position_embeddings.setter
+ def max_position_embeddings(self, value):
+ # Message copied from Transformer-XL documentation
+ raise NotImplementedError(
+ f"The model {self.model_type} is one of the few models that has no sequence length limit."
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2693ac333b84b08769eb15a13a26dcf1a547267
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,121 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Transformer XL checkpoint and datasets."""
+
+
+import argparse
+import os
+import pickle
+import sys
+
+import torch
+
+from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
+from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
+from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
+from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_info()
+
+# We do this to be able to load python 2 datasets pickles
+# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
+data_utils.Vocab = data_utils.TransfoXLTokenizer
+data_utils.Corpus = data_utils.TransfoXLCorpus
+sys.modules["data_utils"] = data_utils
+sys.modules["vocabulary"] = data_utils
+
+
+def convert_transfo_xl_checkpoint_to_pytorch(
+ tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
+):
+ if transfo_xl_dataset_file:
+ # Convert a pre-processed corpus (see original TensorFlow repo)
+ with open(transfo_xl_dataset_file, "rb") as fp:
+ corpus = pickle.load(fp, encoding="latin1")
+ # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
+ pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
+ print(f"Save vocabulary to {pytorch_vocab_dump_path}")
+ corpus_vocab_dict = corpus.vocab.__dict__
+ torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
+
+ corpus_dict_no_vocab = corpus.__dict__
+ corpus_dict_no_vocab.pop("vocab", None)
+ pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
+ print(f"Save dataset to {pytorch_dataset_dump_path}")
+ torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
+
+ if tf_checkpoint_path:
+ # Convert a pre-trained TensorFlow model
+ config_path = os.path.abspath(transfo_xl_config_file)
+ tf_path = os.path.abspath(tf_checkpoint_path)
+
+ print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
+ # Initialise PyTorch model
+ if transfo_xl_config_file == "":
+ config = TransfoXLConfig()
+ else:
+ config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
+ print(f"Building PyTorch model from configuration: {config}")
+ model = TransfoXLLMHeadModel(config)
+
+ model = load_tf_weights_in_transfo_xl(model, config, tf_path)
+ # Save pytorch-model
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
+ print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
+ print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
+ f.write(config.to_json_string())
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ required=True,
+ help="Path to the folder to store the PyTorch model or dataset/vocab.",
+ )
+ parser.add_argument(
+ "--tf_checkpoint_path",
+ default="",
+ type=str,
+ help="An optional path to a TensorFlow checkpoint path to be converted.",
+ )
+ parser.add_argument(
+ "--transfo_xl_config_file",
+ default="",
+ type=str,
+ help=(
+ "An optional config json file corresponding to the pre-trained BERT model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--transfo_xl_dataset_file",
+ default="",
+ type=str,
+ help="An optional dataset file to be converted in a vocabulary.",
+ )
+ args = parser.parse_args()
+ convert_transfo_xl_checkpoint_to_pytorch(
+ args.tf_checkpoint_path,
+ args.transfo_xl_config_file,
+ args.pytorch_dump_folder_path,
+ args.transfo_xl_dataset_file,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..27200a5d63f18b1d6457f1e303022f43b0d75d50
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py
@@ -0,0 +1,1122 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ TF 2.0 Transformer XL model.
+"""
+
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ....modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ....tf_utils import shape_list, stable_softmax
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+
+from .._archive_maps import TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFPositionalEmbedding(keras.layers.Layer):
+ def __init__(self, demb, **kwargs):
+ super().__init__(**kwargs)
+
+ self.inv_freq = 1 / (10000 ** (tf.range(0, demb, 2.0) / demb))
+
+ def call(self, pos_seq, bsz=None):
+ self.inv_freq = tf.cast(self.inv_freq, dtype=pos_seq.dtype)
+ sinusoid_inp = tf.einsum("i,j->ij", pos_seq, self.inv_freq)
+ pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1)
+
+ if bsz is not None:
+ return tf.tile(pos_emb[:, None, :], [1, bsz, 1])
+ else:
+ return pos_emb[:, None, :]
+
+
+class TFPositionwiseFF(keras.layers.Layer):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5, init_std=0.02, **kwargs):
+ super().__init__(**kwargs)
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.layer_1 = keras.layers.Dense(
+ d_inner, kernel_initializer=get_initializer(init_std), activation=tf.nn.relu, name="CoreNet_._0"
+ )
+ self.drop_1 = keras.layers.Dropout(dropout)
+ self.layer_2 = keras.layers.Dense(d_model, kernel_initializer=get_initializer(init_std), name="CoreNet_._3")
+ self.drop_2 = keras.layers.Dropout(dropout)
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.pre_lnorm = pre_lnorm
+
+ def call(self, inp, training=False):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.layer_norm(inp)
+ core_out = self.layer_1(core_out)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.layer_1(inp)
+ core_out = self.drop_1(core_out, training=training)
+ core_out = self.layer_2(core_out)
+ core_out = self.drop_2(core_out, training=training)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class TFRelPartialLearnableMultiHeadAttn(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+ self.output_attentions = output_attentions
+
+ self.qkv_net = keras.layers.Dense(
+ 3 * n_head * d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="qkv_net"
+ )
+
+ self.drop = keras.layers.Dropout(dropout)
+ self.dropatt = keras.layers.Dropout(dropatt)
+ self.o_net = keras.layers.Dense(
+ d_model, kernel_initializer=get_initializer(init_std), use_bias=False, name="o_net"
+ )
+
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layer_norm")
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is not None and r_w_bias is not None: # Biases are shared
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+ else:
+ self.r_r_bias = None
+ self.r_w_bias = None
+
+ self.r_net = keras.layers.Dense(
+ self.n_head * self.d_head, kernel_initializer=get_initializer(init_std), use_bias=False, name="r_net"
+ )
+
+ def build(self, input_shape):
+ if self.r_r_bias is None or self.r_w_bias is None: # Biases are not shared
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ super().build(input_shape)
+
+ def _rel_shift(self, x):
+ x_size = shape_list(x)
+
+ x = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])
+ x = tf.reshape(x, [x_size[1] + 1, x_size[0], x_size[2], x_size[3]])
+ x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1])
+ x = tf.reshape(x, x_size)
+
+ return x
+
+ def call(self, w, r, attn_mask, mems, head_mask, output_attentions, training=False):
+ qlen, rlen, bsz = shape_list(w)[0], shape_list(r)[0], shape_list(w)[1]
+
+ if mems is not None:
+ mems = tf.cast(mems, dtype=w.dtype)
+ cat = tf.concat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = tf.split(w_heads, 3, axis=-1)
+
+ klen = shape_list(w_head_k)[0]
+
+ w_head_q = tf.reshape(w_head_q, (qlen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_k = tf.reshape(w_head_k, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+ w_head_v = tf.reshape(w_head_v, (klen, bsz, self.n_head, self.d_head)) # qlen x bsz x n_head x d_head
+
+ r_head_k = tf.reshape(r_head_k, (rlen, self.n_head, self.d_head)) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = tf.einsum("ibnd,jbnd->ijbn", rw_head_q, w_head_k) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = tf.einsum("ibnd,jnd->ijbn", rr_head_q, r_head_k) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score = attn_score * self.scale
+
+ # compute attention probability
+ if attn_mask is not None:
+ attn_mask_t = attn_mask[:, :, None, None]
+ attn_mask_t = tf.cast(attn_mask_t, dtype=attn_score.dtype)
+ attn_score = attn_score * (1.0 - attn_mask_t) - 1e30 * attn_mask_t
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = stable_softmax(attn_score, axis=1)
+ attn_prob = self.dropatt(attn_prob, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, w_head_v)
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec_sizes = shape_list(attn_vec)
+ attn_vec = tf.reshape(attn_vec, (attn_vec_sizes[0], attn_vec_sizes[1], self.n_head * self.d_head))
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out, training=training)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class TFRelPartialLearnableDecoderLayer(keras.layers.Layer):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ d_inner,
+ dropout,
+ dropatt=0.0,
+ pre_lnorm=False,
+ r_w_bias=None,
+ r_r_bias=None,
+ layer_norm_epsilon=1e-5,
+ init_std=0.02,
+ output_attentions=False,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.dec_attn = TFRelPartialLearnableMultiHeadAttn(
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=dropatt,
+ pre_lnorm=pre_lnorm,
+ r_w_bias=r_w_bias,
+ r_r_bias=r_r_bias,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ output_attentions=output_attentions,
+ name="dec_attn",
+ )
+ self.pos_ff = TFPositionwiseFF(
+ d_model,
+ d_inner,
+ dropout,
+ pre_lnorm=pre_lnorm,
+ init_std=init_std,
+ layer_norm_epsilon=layer_norm_epsilon,
+ name="pos_ff",
+ )
+
+ def call(self, dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=False):
+ attn_outputs = self.dec_attn(dec_inp, r, dec_attn_mask, mems, head_mask, output_attentions, training=training)
+ ff_output = self.pos_ff(attn_outputs[0], training=training)
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class TFTransfoEmbeddings(keras.layers.Layer):
+ def __init__(self, vocab_size, emb_size, init_std, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.emb_size = emb_size
+ self.init_std = init_std
+
+ def build(self, input_shape):
+ self.weight = self.add_weight(
+ shape=(self.vocab_size, self.emb_size),
+ initializer=get_initializer(self.init_std),
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(self, inputs):
+ return tf.gather(self.weight, inputs)
+
+
+class TFAdaptiveEmbedding(keras.layers.Layer):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, init_std=0.02, sample_softmax=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+ self.init_std = init_std
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = []
+ self.emb_projs = []
+
+ if div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(
+ TFTransfoEmbeddings(
+ r_idx - l_idx,
+ d_emb_i,
+ init_std,
+ name=f"emb_layers_._{i}",
+ )
+ )
+
+ def build(self, input_shape):
+ for i in range(len(self.cutoffs)):
+ d_emb_i = self.d_embed // (self.div_val**i)
+ self.emb_projs.append(
+ self.add_weight(
+ shape=(d_emb_i, self.d_proj),
+ initializer=get_initializer(self.init_std),
+ trainable=True,
+ name=f"emb_projs_._{i}",
+ )
+ )
+
+ super().build(input_shape)
+
+ def call(self, inp):
+ if self.div_val == 1:
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+ else:
+ inp_flat = tf.reshape(inp, (-1,))
+ emb_flat = tf.zeros([shape_list(inp_flat)[0], self.d_proj])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+
+ inp_i = tf.boolean_mask(inp_flat, mask_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = tf.einsum("id,de->ie", emb_i, self.emb_projs[i])
+
+ mask_idx = tf.where(mask_i)
+ scatter = tf.scatter_nd(mask_idx, emb_i, shape_list(emb_flat))
+ emb_flat = tf.cast(emb_flat, dtype=scatter.dtype)
+ emb_flat += scatter
+
+ embed_shape = shape_list(inp) + [self.d_proj]
+ embed = tf.reshape(emb_flat, embed_shape)
+
+ embed *= self.emb_scale
+
+ return embed
+
+
+@keras_serializable
+class TFTransfoXLMainLayer(keras.layers.Layer):
+ config_class = TransfoXLConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.output_hidden_states = config.output_hidden_states
+ self.output_attentions = config.output_attentions
+ self.return_dict = config.use_return_dict
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+ self.untie_r = config.untie_r
+
+ self.word_emb = TFAdaptiveEmbedding(
+ config.vocab_size,
+ config.d_embed,
+ config.d_model,
+ config.cutoffs,
+ div_val=config.div_val,
+ init_std=config.init_std,
+ name="word_emb",
+ )
+
+ self.drop = keras.layers.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ self.layers = []
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ TFRelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if self.untie_r else self.r_w_bias,
+ r_r_bias=None if self.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ init_std=config.init_std,
+ output_attentions=self.output_attentions,
+ name=f"layers_._{i}",
+ )
+ )
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = TFPositionalEmbedding(self.d_model, name="pos_emb")
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ def build(self, input_shape):
+ if not self.untie_r:
+ self.r_w_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias"
+ )
+ self.r_r_bias = self.add_weight(
+ shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias"
+ )
+ super().build(input_shape)
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, value):
+ raise NotImplementedError
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ raise NotImplementedError
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ for i in range(self.n_layer):
+ empty = tf.zeros([self.mem_len, bsz, self.d_model])
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ new_mems = []
+ end_idx = mlen + tf.math.maximum(0, qlen)
+ beg_idx = tf.math.maximum(0, end_idx - tf.convert_to_tensor(self.mem_len))
+ for i in range(len(hids)):
+ mems[i] = tf.cast(mems[i], dtype=hids[i].dtype)
+ cat = tf.concat([mems[i], hids[i]], axis=0)
+ tf.stop_gradient(cat)
+ new_mems.append(cat[beg_idx:end_idx])
+
+ return new_mems
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ):
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = tf.transpose(input_ids, perm=(1, 0))
+ qlen, bsz = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2))
+ qlen, bsz = shape_list(inputs_embeds)[:2]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = shape_list(mems[0])[0] if mems is not None else 0
+ klen = mlen + qlen
+
+ # Compute decoder attention mask
+ all_ones = tf.ones([qlen, klen], dtype=tf.int32)
+ upper_mask = 1 - tf.linalg.band_part(tf.ones([qlen, klen], dtype=tf.int32), -1, mlen)
+ if self.same_length:
+ mask_len = klen - self.mem_len
+ mask_shift_len = qlen - tf.nn.relu(mask_len) # Lazy clamping of negatives to zero
+
+ # Use an indicator variable instead of a conditional to keep the compiler happy
+ lower_mask = tf.linalg.band_part(all_ones, -1, 0) - (
+ tf.linalg.band_part(all_ones, mask_shift_len - 1, 0) * tf.cast(mask_shift_len != 0, tf.int32)
+ )
+ dec_attn_mask = upper_mask + lower_mask
+ else:
+ dec_attn_mask = upper_mask
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = tf.range(klen - 1, -1, -1.0)
+ if self.clamp_len > 0:
+ pos_seq = tf.minimum(pos_seq, self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb, training=training)
+ pos_emb = self.drop(pos_emb, training=training)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask,
+ mems_i,
+ head_mask[i],
+ output_attentions,
+ training=training,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out, training=training)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = tf.transpose(core_out, perm=(1, 0, 2))
+
+ if output_hidden_states:
+ # Transpose to library standard shape [bsz, len, hidden_dim] and add last layer
+ hids = tuple(tf.transpose(t, perm=(1, 0, 2)) for t in hids)
+ hids = hids + (core_out,)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions)
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TFTransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+class TFTransfoXLPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ base_model_prefix = "transformer"
+
+
+@dataclass
+class TFTransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`tf.Tensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ prediction_scores: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+@dataclass
+class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ mems: List[tf.Tensor] = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[tf.Tensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`tf.Tensor` or `Numpy array` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ inputs_embeds (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLModelOutput | Tuple[tf.Tensor]:
+ outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+ self.sample_softmax = config.sample_softmax
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = TFAdaptiveSoftmaxMask(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val, name="crit"
+ )
+
+ def _resize_token_embeddings(self, new_num_tokens):
+ raise NotImplementedError()
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if len(self.crit.out_layers) > 0:
+ return self.crit.out_layers[-1]
+ return None
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: bool | None = None,
+ output_hidden_states: bool | None = None,
+ return_dict: bool | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: bool = False,
+ ) -> TFTransfoXLLMHeadModelOutput | Tuple[tf.Tensor]:
+ if input_ids is not None:
+ bsz, tgt_len = shape_list(input_ids)[:2]
+ else:
+ bsz, tgt_len = shape_list(inputs_embeds)[:2]
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems,
+ head_mask,
+ inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ softmax_output = self.crit(pred_hid, labels, training=training)
+ prediction_scores = softmax_output if labels is None else ()
+
+ if not return_dict:
+ return (prediction_scores,) + transformer_outputs[1:]
+
+ return TFTransfoXLLMHeadModelOutput(
+ prediction_scores=prediction_scores,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ input_ids = tf.expand_dims(input_ids[:, -1], axis=-1)
+ else:
+ input_ids = input_ids
+
+ return inputs
+
+ # Adapted from the torch tie_weights function
+ def tf_to_pt_weight_rename(self, tf_weight):
+ if self.config.tie_word_embeddings and "crit.out_layers" in tf_weight:
+ return tf_weight, tf_weight.replace("crit.out_layers", "transformer.word_emb.emb_layers")
+ elif self.config.tie_projs and "crit.out_projs" in tf_weight:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ return tf_weight, tf_weight.replace(f"crit.out_projs.{i}", "transformer.word_emb.emb_projs.0")
+ elif tie_proj and self.config.div_val != 1:
+ # self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+ return tf_weight, tf_weight.replace("crit.out_projs", "transformer.word_emb.emb_projs")
+ else:
+ return (tf_weight,)
+
+
+@add_start_docstrings(
+ """
+ The Transfo XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TFTransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1,GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TFTransfoXLForSequenceClassification(TFTransfoXLPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+ self.score = keras.layers.Dense(
+ config.num_labels,
+ kernel_initializer=get_initializer(config.init_range),
+ name="score",
+ use_bias=False,
+ )
+ self.transformer = TFTransfoXLMainLayer(config, name="transformer")
+
+ def get_output_embeddings(self):
+ # Remove after transformers v4.32. Fix this model's `test_model_common_attributes` test too.
+ logger.warning(
+ "Sequence classification models do not have output embeddings. `.get_output_embeddings` will be removed "
+ "in transformers v4.32."
+ )
+ return self.transformer.word_emb
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ mems: List[tf.Tensor] | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple, TFTransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the cross entropy classification loss. Indices should be in `[0, ...,
+ config.vocab_size - 1]`.
+ """
+ transformer_outputs = self.transformer(
+ input_ids=input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+ in_logits = None
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ sequence_lengths = (
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
+ - 1
+ )
+ sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+ loss = None
+
+ if labels is not None:
+ if input_ids is not None:
+ batch_size, sequence_length = shape_list(input_ids)[:2]
+ else:
+ batch_size, sequence_length = shape_list(inputs_embeds)[:2]
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+
+ if not tf.is_tensor(sequence_lengths):
+ in_logits = logits[0:batch_size, sequence_lengths]
+
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]))
+
+ pooled_logits = in_logits if in_logits is not None else logits
+
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed1488d5595cb8f36eb540992fb4ca46534a60fb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py
@@ -0,0 +1,179 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ A TF 2.0 Adaptive Softmax for Transformer XL model.
+"""
+
+
+import tensorflow as tf
+
+from ....modeling_tf_utils import keras
+from ....tf_utils import shape_list
+
+
+class TFAdaptiveSoftmaxMask(keras.layers.Layer):
+ def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1, keep_order=False, **kwargs):
+ super().__init__(**kwargs)
+
+ self.vocab_size = vocab_size
+ self.d_embed = d_embed
+ self.d_proj = d_proj
+
+ self.cutoffs = cutoffs + [vocab_size]
+ self.cutoff_ends = [0] + self.cutoffs
+ self.div_val = div_val
+
+ self.shortlist_size = self.cutoffs[0]
+ self.n_clusters = len(self.cutoffs) - 1
+ self.head_size = self.shortlist_size + self.n_clusters
+ self.keep_order = keep_order
+
+ self.out_layers = []
+ self.out_projs = []
+
+ def build(self, input_shape):
+ if self.n_clusters > 0:
+ self.cluster_weight = self.add_weight(
+ shape=(self.n_clusters, self.d_embed), initializer="zeros", trainable=True, name="cluster_weight"
+ )
+ self.cluster_bias = self.add_weight(
+ shape=(self.n_clusters,), initializer="zeros", trainable=True, name="cluster_bias"
+ )
+
+ if self.div_val == 1:
+ for i in range(len(self.cutoffs)):
+ if self.d_proj != self.d_embed:
+ weight = self.add_weight(
+ shape=(self.d_embed, self.d_proj),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_projs_._{i}",
+ )
+ self.out_projs.append(weight)
+ else:
+ self.out_projs.append(None)
+ weight = self.add_weight(
+ shape=(self.vocab_size, self.d_embed),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(self.vocab_size,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = self.d_embed // (self.div_val**i)
+
+ weight = self.add_weight(
+ shape=(d_emb_i, self.d_proj), initializer="zeros", trainable=True, name=f"out_projs_._{i}"
+ )
+ self.out_projs.append(weight)
+ weight = self.add_weight(
+ shape=(r_idx - l_idx, d_emb_i),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._weight",
+ )
+ bias = self.add_weight(
+ shape=(r_idx - l_idx,),
+ initializer="zeros",
+ trainable=True,
+ name=f"out_layers_._{i}_._bias",
+ )
+ self.out_layers.append((weight, bias))
+ super().build(input_shape)
+
+ @staticmethod
+ def _logit(x, W, b, proj=None):
+ y = x
+ if proj is not None:
+ y = tf.einsum("ibd,ed->ibe", y, proj)
+ return tf.einsum("ibd,nd->ibn", y, W) + b
+
+ @staticmethod
+ def _gather_logprob(logprob, target):
+ lp_size = shape_list(logprob)
+ r = tf.range(lp_size[0], dtype=target.dtype)
+ idx = tf.stack([r, target], 1)
+ return tf.gather_nd(logprob, idx)
+
+ def call(self, hidden, target, return_mean=True, training=False):
+ head_logprob = 0
+ if self.n_clusters == 0:
+ output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
+ if target is not None:
+ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
+ out = tf.nn.log_softmax(output, axis=-1)
+ else:
+ hidden_sizes = shape_list(hidden)
+ out = []
+ loss = tf.zeros(hidden_sizes[:2])
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ if target is not None:
+ mask = (target >= l_idx) & (target < r_idx)
+ mask_idx = tf.where(mask)
+ cur_target = tf.boolean_mask(target, mask) - l_idx
+
+ if self.div_val == 1:
+ cur_W = self.out_layers[0][0][l_idx:r_idx]
+ cur_b = self.out_layers[0][1][l_idx:r_idx]
+ else:
+ cur_W = self.out_layers[i][0]
+ cur_b = self.out_layers[i][1]
+
+ if i == 0:
+ cur_W = tf.concat([cur_W, self.cluster_weight], 0)
+ cur_b = tf.concat([cur_b, self.cluster_bias], 0)
+
+ head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
+ head_logprob = tf.nn.log_softmax(head_logit)
+ out.append(head_logprob[..., : self.cutoffs[0]])
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
+ else:
+ tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
+ tail_logprob = tf.nn.log_softmax(tail_logit)
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
+ logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
+ out.append(logprob_i)
+ if target is not None:
+ cur_head_logprob = tf.boolean_mask(head_logprob, mask)
+ cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
+ cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
+ cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
+ if target is not None:
+ loss += tf.scatter_nd(mask_idx, -cur_logprob, shape_list(loss))
+ out = tf.concat(out, axis=-1)
+
+ if target is not None:
+ if return_mean:
+ loss = tf.reduce_mean(loss)
+ # Add the training-time loss value to the layer using `self.add_loss()`.
+ self.add_loss(loss)
+
+ # Log the loss as a metric (we could log arbitrary metrics,
+ # including different metrics for training and inference.
+ self.add_metric(loss, name=self.name, aggregation="mean" if return_mean else "")
+
+ return out
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..897a3899c74cbd84713d96e3dad90cce2411dd17
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py
@@ -0,0 +1,1295 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
+ https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
+"""
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ....modeling_utils import PreTrainedModel
+from ....utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_transfo_xl import TransfoXLConfig
+from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "transfo-xl/transfo-xl-wt103"
+_CONFIG_FOR_DOC = "TransfoXLConfig"
+
+
+from .._archive_maps import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def build_tf_to_pytorch_map(model, config):
+ """
+ A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
+ PyTorch model as possible.
+ """
+ tf_to_pt_map = {}
+
+ if hasattr(model, "transformer"):
+ # We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
+ tf_to_pt_map.update(
+ {
+ "transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
+ "transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
+ }
+ )
+ for i, (out_l, proj_l, tie_proj) in enumerate(
+ zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
+ ):
+ layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
+ if config.tie_word_embeddings:
+ tf_to_pt_map.update({layer_str + "b": out_l.bias})
+ else:
+ raise NotImplementedError
+ # I don't think this is implemented in the TF code
+ tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
+ if not tie_proj:
+ tf_to_pt_map.update({layer_str + "proj": proj_l})
+ # Now load the rest of the transformer
+ model = model.transformer
+
+ # Embeddings
+ for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
+ layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
+ tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
+
+ # Transformer blocks
+ for i, b in enumerate(model.layers):
+ layer_str = f"transformer/layer_{i}/"
+ tf_to_pt_map.update(
+ {
+ layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
+ layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
+ layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
+ layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
+ layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
+ layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
+ layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
+ layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
+ layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
+ layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
+ layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
+ }
+ )
+
+ # Relative positioning biases
+ if config.untie_r:
+ r_r_list = []
+ r_w_list = []
+ for b in model.layers:
+ r_r_list.append(b.dec_attn.r_r_bias)
+ r_w_list.append(b.dec_attn.r_w_bias)
+ else:
+ r_r_list = [model.r_r_bias]
+ r_w_list = [model.r_w_bias]
+ tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
+ return tf_to_pt_map
+
+
+def load_tf_weights_in_transfo_xl(model, config, tf_path):
+ """Load tf checkpoints in a pytorch model"""
+ try:
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ # Build TF to PyTorch weights loading map
+ tf_to_pt_map = build_tf_to_pytorch_map(model, config)
+
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ tf_weights = {}
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ tf_weights[name] = array
+
+ for name, pointer in tf_to_pt_map.items():
+ assert name in tf_weights
+ array = tf_weights[name]
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if "kernel" in name or "proj" in name:
+ array = np.transpose(array)
+ if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
+ # Here we will split the TF weights
+ assert len(pointer) == array.shape[0]
+ for i, p_i in enumerate(pointer):
+ arr_i = array[i, ...]
+ try:
+ assert p_i.shape == arr_i.shape
+ except AssertionError as e:
+ e.args += (p_i.shape, arr_i.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name} for layer {i}")
+ p_i.data = torch.from_numpy(arr_i)
+ else:
+ try:
+ assert (
+ pointer.shape == array.shape
+ ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ tf_weights.pop(name, None)
+ tf_weights.pop(name + "/Adam", None)
+ tf_weights.pop(name + "/Adam_1", None)
+
+ logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
+ return model
+
+
+class PositionalEmbedding(nn.Module):
+ def __init__(self, demb):
+ super().__init__()
+
+ self.demb = demb
+
+ inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
+ self.register_buffer("inv_freq", inv_freq)
+
+ def forward(self, pos_seq, bsz=None):
+ sinusoid_inp = torch.outer(pos_seq, self.inv_freq)
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
+
+ if bsz is not None:
+ return pos_emb[:, None, :].expand(-1, bsz, -1)
+ else:
+ return pos_emb[:, None, :]
+
+
+class PositionwiseFF(nn.Module):
+ def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
+ super().__init__()
+
+ self.d_model = d_model
+ self.d_inner = d_inner
+ self.dropout = dropout
+
+ self.CoreNet = nn.Sequential(
+ nn.Linear(d_model, d_inner),
+ nn.ReLU(inplace=True),
+ nn.Dropout(dropout),
+ nn.Linear(d_inner, d_model),
+ nn.Dropout(dropout),
+ )
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.pre_lnorm = pre_lnorm
+
+ def forward(self, inp):
+ if self.pre_lnorm:
+ # layer normalization + positionwise feed-forward
+ core_out = self.CoreNet(self.layer_norm(inp))
+
+ # residual connection
+ output = core_out + inp
+ else:
+ # positionwise feed-forward
+ core_out = self.CoreNet(inp)
+
+ # residual connection + layer normalization
+ output = self.layer_norm(inp + core_out)
+
+ return output
+
+
+class RelPartialLearnableMultiHeadAttn(nn.Module):
+ def __init__(
+ self,
+ n_head,
+ d_model,
+ d_head,
+ dropout,
+ dropatt=0,
+ pre_lnorm=False,
+ r_r_bias=None,
+ r_w_bias=None,
+ layer_norm_epsilon=1e-5,
+ ):
+ super().__init__()
+
+ self.n_head = n_head
+ self.d_model = d_model
+ self.d_head = d_head
+ self.dropout = dropout
+
+ self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
+
+ self.drop = nn.Dropout(dropout)
+ self.dropatt = nn.Dropout(dropatt)
+ self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
+
+ self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
+
+ self.scale = 1 / (d_head**0.5)
+
+ self.pre_lnorm = pre_lnorm
+
+ if r_r_bias is None or r_w_bias is None: # Biases are not shared
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ else:
+ self.r_r_bias = r_r_bias
+ self.r_w_bias = r_w_bias
+
+ self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
+
+ def _rel_shift(self, x):
+ zero_pad_shape = (x.size(0), 1) + x.size()[2:]
+ zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
+ x_padded = torch.cat([zero_pad, x], dim=1)
+
+ x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
+ x_padded = x_padded.view(*x_padded_shape)
+
+ x = x_padded[1:].view_as(x)
+
+ return x
+
+ def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
+
+ if mems is not None:
+ cat = torch.cat([mems, w], 0)
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(cat))
+ else:
+ w_heads = self.qkv_net(cat)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+ w_head_q = w_head_q[-qlen:]
+ else:
+ if self.pre_lnorm:
+ w_heads = self.qkv_net(self.layer_norm(w))
+ else:
+ w_heads = self.qkv_net(w)
+ r_head_k = self.r_net(r)
+
+ w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
+
+ klen = w_head_k.size(0)
+
+ w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+ w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
+
+ r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
+
+ # compute attention score
+ rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
+ AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
+
+ rr_head_q = w_head_q + self.r_r_bias
+ BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
+ BD = self._rel_shift(BD)
+
+ # [qlen x klen x bsz x n_head]
+ attn_score = AC + BD
+ attn_score.mul_(self.scale)
+
+ mask_value = torch.finfo(attn_score.dtype).min
+
+ # compute attention probability
+ if attn_mask is not None and torch.sum(attn_mask).item():
+ attn_mask = attn_mask == 1 # Switch to bool
+ if attn_mask.dim() == 2:
+ attn_score = (
+ attn_score.float().masked_fill(attn_mask[None, :, :, None], mask_value).type_as(attn_score)
+ )
+ elif attn_mask.dim() == 3:
+ attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], mask_value).type_as(attn_score)
+
+ # [qlen x klen x bsz x n_head]
+ attn_prob = nn.functional.softmax(attn_score, dim=1)
+ attn_prob = self.dropatt(attn_prob)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_prob = attn_prob * head_mask
+
+ # compute attention vector
+ attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
+
+ # [qlen x bsz x n_head x d_head]
+ attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
+
+ # linear projection
+ attn_out = self.o_net(attn_vec)
+ attn_out = self.drop(attn_out)
+
+ if self.pre_lnorm:
+ # residual connection
+ outputs = [w + attn_out]
+ else:
+ # residual connection + layer normalization
+ outputs = [self.layer_norm(w + attn_out)]
+
+ if output_attentions:
+ outputs.append(attn_prob)
+
+ return outputs
+
+
+class RelPartialLearnableDecoderLayer(nn.Module):
+ def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
+ super().__init__()
+
+ self.dec_attn = RelPartialLearnableMultiHeadAttn(
+ n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
+ )
+ self.pos_ff = PositionwiseFF(
+ d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
+ )
+
+ def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
+ attn_outputs = self.dec_attn(
+ dec_inp,
+ r,
+ attn_mask=dec_attn_mask,
+ mems=mems,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ )
+ ff_output = self.pos_ff(attn_outputs[0])
+
+ outputs = [ff_output] + attn_outputs[1:]
+
+ return outputs
+
+
+class AdaptiveEmbedding(nn.Module):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
+ super().__init__()
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+
+ self.cutoffs = cutoffs + [n_token]
+ self.div_val = div_val
+ self.d_proj = d_proj
+
+ self.emb_scale = d_proj**0.5
+
+ self.cutoff_ends = [0] + self.cutoffs
+
+ self.emb_layers = nn.ModuleList()
+ self.emb_projs = nn.ParameterList()
+ if div_val == 1:
+ self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
+ if d_proj != d_embed:
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+ self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
+ self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
+
+ def forward(self, inp):
+ if self.div_val == 1:
+ embed = self.emb_layers[0](inp)
+ if self.d_proj != self.d_embed:
+ embed = nn.functional.linear(embed, self.emb_projs[0])
+ else:
+ param = next(self.parameters())
+ inp_flat = inp.view(-1)
+ emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+
+ mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
+ indices_i = mask_i.nonzero().squeeze()
+
+ if indices_i.numel() == 0:
+ continue
+
+ inp_i = inp_flat.index_select(0, indices_i) - l_idx
+ emb_i = self.emb_layers[i](inp_i)
+ emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
+
+ emb_flat.index_copy_(0, indices_i, emb_i)
+
+ embed_shape = inp.size() + (self.d_proj,)
+ embed = emb_flat.view(embed_shape)
+
+ embed.mul_(self.emb_scale)
+
+ return embed
+
+
+class TransfoXLPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TransfoXLConfig
+ load_tf_weights = load_tf_weights_in_transfo_xl
+ base_model_prefix = "transformer"
+
+ def _init_weight(self, weight):
+ if self.config.init == "uniform":
+ nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
+ elif self.config.init == "normal":
+ nn.init.normal_(weight, 0.0, self.config.init_std)
+
+ def _init_bias(self, bias):
+ nn.init.constant_(bias, 0.0)
+
+ def _init_weights(self, m):
+ """Initialize the weights."""
+ classname = m.__class__.__name__
+ if classname.find("Linear") != -1:
+ if hasattr(m, "weight") and m.weight is not None:
+ self._init_weight(m.weight)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ elif classname.find("AdaptiveEmbedding") != -1:
+ if hasattr(m, "emb_projs"):
+ for i in range(len(m.emb_projs)):
+ if m.emb_projs[i] is not None:
+ nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("Embedding") != -1:
+ if hasattr(m, "weight"):
+ self._init_weight(m.weight)
+ elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
+ if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
+ self._init_weight(m.cluster_weight)
+ if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
+ self._init_bias(m.cluster_bias)
+ if hasattr(m, "out_projs"):
+ for i in range(len(m.out_projs)):
+ if m.out_projs[i] is not None:
+ nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
+ elif classname.find("LayerNorm") != -1:
+ if hasattr(m, "weight"):
+ nn.init.normal_(m.weight, 1.0, self.config.init_std)
+ if hasattr(m, "bias") and m.bias is not None:
+ self._init_bias(m.bias)
+ else:
+ if hasattr(m, "r_emb"):
+ self._init_weight(m.r_emb)
+ if hasattr(m, "r_w_bias"):
+ self._init_weight(m.r_w_bias)
+ if hasattr(m, "r_r_bias"):
+ self._init_weight(m.r_r_bias)
+ if hasattr(m, "r_bias"):
+ self._init_bias(m.r_bias)
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
+ """
+ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
+ weights embeddings afterwards if the model class has a *tie_weights()* method.
+
+ Arguments:
+ new_num_tokens: (*optional*) int:
+ New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
+ the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
+ just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
+ layer: (*optional*) int:
+ Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
+ resized. Be aware that when resizing other than the last layer, you have to ensure that the new
+ token(s) in the tokenizer are at the corresponding position.
+
+ Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
+ """
+ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
+
+ if new_num_tokens is None:
+ return self.get_input_embeddings()
+
+ new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
+ assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
+ model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
+
+ # Update base model and current model config
+ self.config.vocab_size = new_num_tokens
+ base_model.vocab_size = new_num_tokens
+ base_model.n_token = new_num_tokens
+
+ new_embedding_shapes = self._get_embedding_shapes()
+ self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
+
+ # Tie weights again if needed
+ self.tie_weights()
+
+ return model_embeds
+
+ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
+ embeddings = self.get_input_embeddings()
+ if layer == -1:
+ layer = len(embeddings.emb_layers) - 1
+ assert 0 <= layer <= len(embeddings.emb_layers) - 1
+
+ new_num_tokens_layer = (
+ new_num_tokens
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
+ - sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
+ )
+ return new_num_tokens_layer, layer
+
+ def _get_embedding_shapes(self):
+ embeddings = self.get_input_embeddings()
+ return [emb.weight.shape[0] for emb in embeddings.emb_layers]
+
+ def _resize_token_embeddings(self, new_num_tokens, layer=-1):
+ embeddings = self.get_input_embeddings()
+ if new_num_tokens is None:
+ return embeddings
+ new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
+ embeddings.emb_layers[layer] = new_embeddings_layer
+
+ self.set_input_embeddings(embeddings)
+
+ return self.get_input_embeddings()
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ embeddings = self.get_input_embeddings()
+
+ for i in range(layer, len(embeddings.cutoffs)):
+ embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
+
+ embeddings.cutoff_ends = [0] + embeddings.cutoffs
+ embeddings.n_token = new_num_tokens
+
+ self.config.cutoffs = embeddings.cutoffs[:-1]
+
+ return embeddings.cutoffs
+
+
+@dataclass
+class TransfoXLModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: torch.FloatTensor
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class TransfoXLLMHeadModelOutput(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
+ Language modeling losses (not reduced).
+ prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
+ input) to speed up sequential decoding. The token ids which have their past given to this model should not
+ be passed as input ids as they have already been computed.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
+ Reduced language modeling loss.
+ """
+
+ losses: Optional[torch.FloatTensor] = None
+ prediction_scores: torch.FloatTensor = None
+ mems: List[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ loss: Optional[torch.FloatTensor] = None
+
+ @property
+ def logits(self):
+ # prediction scores are the output of the adaptive softmax, see
+ # the file `modeling_transfo_xl_utilities`. Since the adaptive
+ # softmax returns the log softmax value, `self.prediction_scores`
+ # are strictly speaking not exactly `logits`, but behave the same
+ # way logits do.
+ return self.prediction_scores
+
+
+TRANSFO_XL_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TRANSFO_XL_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ mems (`List[torch.FloatTensor]` of length `config.n_layers`):
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
+ given to this model should not be passed as `input_ids` as they have already been computed.
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLModel(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.n_token = config.vocab_size
+
+ self.d_embed = config.d_embed
+ self.d_model = config.d_model
+ self.n_head = config.n_head
+ self.d_head = config.d_head
+
+ self.word_emb = AdaptiveEmbedding(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ self.drop = nn.Dropout(config.dropout)
+
+ self.n_layer = config.n_layer
+ self.mem_len = config.mem_len
+ self.attn_type = config.attn_type
+
+ if not config.untie_r:
+ self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+ self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
+
+ self.layers = nn.ModuleList()
+ if config.attn_type == 0: # the default attention
+ for i in range(config.n_layer):
+ self.layers.append(
+ RelPartialLearnableDecoderLayer(
+ config.n_head,
+ config.d_model,
+ config.d_head,
+ config.d_inner,
+ config.dropout,
+ dropatt=config.dropatt,
+ pre_lnorm=config.pre_lnorm,
+ r_w_bias=None if config.untie_r else self.r_w_bias,
+ r_r_bias=None if config.untie_r else self.r_r_bias,
+ layer_norm_epsilon=config.layer_norm_epsilon,
+ )
+ )
+ else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
+ raise NotImplementedError # Removed them to avoid maintaining dead code
+
+ self.same_length = config.same_length
+ self.clamp_len = config.clamp_len
+
+ if self.attn_type == 0: # default attention
+ self.pos_emb = PositionalEmbedding(self.d_model)
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.word_emb
+
+ def set_input_embeddings(self, new_embeddings):
+ self.word_emb = new_embeddings
+
+ def backward_compatible(self):
+ self.sample_softmax = -1
+
+ def reset_memory_length(self, mem_len):
+ self.mem_len = mem_len
+
+ def _prune_heads(self, heads):
+ logger.info("Head pruning is not implemented for Transformer-XL model")
+ pass
+
+ def init_mems(self, bsz):
+ if self.mem_len > 0:
+ mems = []
+ param = next(self.parameters())
+ for i in range(self.n_layer):
+ empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
+ mems.append(empty)
+
+ return mems
+ else:
+ return None
+
+ def _update_mems(self, hids, mems, mlen, qlen):
+ # does not deal with None
+ if mems is None:
+ return None
+
+ # mems is not None
+ assert len(hids) == len(mems), "len(hids) != len(mems)"
+
+ # There are `mlen + qlen` steps that can be cached into mems
+ with torch.no_grad():
+ new_mems = []
+ end_idx = mlen + max(0, qlen)
+ beg_idx = max(0, end_idx - self.mem_len)
+ for i in range(len(hids)):
+ cat = torch.cat([mems[i], hids[i]], dim=0)
+ new_mems.append(cat[beg_idx:end_idx].detach())
+
+ return new_mems
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
+ # so we transpose here from shape [bsz, len] to shape [len, bsz]
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_ids = input_ids.transpose(0, 1).contiguous()
+ qlen, bsz = input_ids.size()
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
+ qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if mems is None:
+ mems = self.init_mems(bsz)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
+ # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
+ if head_mask is not None:
+ if head_mask.dim() == 1:
+ head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
+ head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
+ elif head_mask.dim() == 2:
+ head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
+ head_mask = head_mask.to(
+ dtype=next(self.parameters()).dtype
+ ) # switch to float if need + fp16 compatibility
+ else:
+ head_mask = [None] * self.n_layer
+
+ if inputs_embeds is not None:
+ word_emb = inputs_embeds
+ else:
+ word_emb = self.word_emb(input_ids)
+
+ mlen = mems[0].size(0) if mems is not None else 0
+ klen = mlen + qlen
+ if self.same_length:
+ all_ones = word_emb.new_ones((qlen, klen), dtype=torch.bool)
+ mask_len = klen - self.mem_len
+ if mask_len > 0:
+ mask_shift_len = qlen - mask_len
+ else:
+ mask_shift_len = qlen
+ dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
+ else:
+ dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.bool), diagonal=1 + mlen)[
+ :, :, None
+ ]
+
+ hids = []
+ attentions = [] if output_attentions else None
+ if self.attn_type == 0: # default
+ pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=torch.int64).type_as(
+ dtype=word_emb.dtype
+ )
+ if self.clamp_len > 0:
+ pos_seq.clamp_(max=self.clamp_len)
+ pos_emb = self.pos_emb(pos_seq)
+
+ core_out = self.drop(word_emb)
+ pos_emb = self.drop(pos_emb)
+
+ for i, layer in enumerate(self.layers):
+ hids.append(core_out)
+ mems_i = None if mems is None else mems[i]
+ layer_outputs = layer(
+ core_out,
+ pos_emb,
+ dec_attn_mask=dec_attn_mask,
+ mems=mems_i,
+ head_mask=head_mask[i],
+ output_attentions=output_attentions,
+ )
+ core_out = layer_outputs[0]
+ if output_attentions:
+ attentions.append(layer_outputs[1])
+ else: # learnable embeddings and absolute embeddings
+ raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
+
+ core_out = self.drop(core_out)
+
+ new_mems = self._update_mems(hids, mems, mlen, qlen)
+
+ if output_hidden_states:
+ # Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
+ hids.append(core_out)
+ hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
+ else:
+ hids = None
+ if output_attentions:
+ # Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
+ attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
+ # We transpose back here to shape [bsz, len, hidden_dim]
+ core_out = core_out.transpose(0, 1).contiguous()
+
+ if not return_dict:
+ return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
+
+ return TransfoXLModelOutput(
+ last_hidden_state=core_out,
+ mems=new_mems,
+ hidden_states=hids,
+ attentions=attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
+ input embeddings)
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
+ _tied_weights_keys = [r"crit\.out_projs\.\d+", r"crit\.out_layers\.\d+\.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = TransfoXLModel(config)
+ self.sample_softmax = config.sample_softmax
+ self.trainer_compatible = getattr(config, "trainer_compatible", False)
+
+ if not self.trainer_compatible:
+ warnings.warn(
+ "The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order "
+ "to use that updated output, please specify `trainer_compatible=True` as your configuration"
+ " attribute.",
+ DeprecationWarning,
+ )
+
+ assert self.sample_softmax <= 0, (
+ "Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
+ " https://github.com/huggingface/transformers/issues/3310"
+ )
+
+ self.crit = ProjectedAdaptiveLogSoftmax(
+ config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def tie_weights(self):
+ """
+ Run this to be sure output and input (adaptive) softmax weights are tied
+ """
+
+ if self.config.tie_word_embeddings:
+ for i in range(len(self.crit.out_layers)):
+ self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
+ if self.config.tie_projs:
+ for i, tie_proj in enumerate(self.config.tie_projs):
+ if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
+ elif tie_proj and self.config.div_val != 1:
+ if self.config.torchscript:
+ self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
+ else:
+ self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
+
+ def reset_memory_length(self, mem_len):
+ self.transformer.reset_memory_length(mem_len)
+
+ def init_mems(self, bsz):
+ return self.transformer.init_mems(bsz)
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLLMHeadModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if input_ids is not None:
+ bsz, tgt_len = input_ids.size(0), input_ids.size(1)
+ elif inputs_embeds is not None:
+ bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden = transformer_outputs[0]
+ pred_hid = last_hidden[:, -tgt_len:]
+
+ if labels is not None:
+ # Prevents all labels being -100 and throwing an error
+ # when backwarding the loss
+ miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
+ if miss_valid_label:
+ # Sets an token, just to prevent loss from being NaN
+ labels[0, 1] = self.config.eos_token_id
+
+ softmax_output = self.crit(pred_hid, labels)
+ prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
+
+ if labels is not None:
+ losses = softmax_output.view(bsz, tgt_len - 1)
+ # Avoids from incorporating padding (-100) tokens into loss value
+ loss = losses[losses != 0].mean()
+ else:
+ losses, loss = None, None
+
+ if not return_dict:
+ if self.trainer_compatible:
+ output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
+ output += transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+ else:
+ output = (prediction_scores, *transformer_outputs[1:])
+ output = ((losses,) + output) if losses is not None else output
+ return (output + (loss,)) if loss is not None else output
+
+ return TransfoXLLMHeadModelOutput(
+ loss=loss,
+ prediction_scores=prediction_scores,
+ losses=losses,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_output_embeddings(self):
+ """Double-check if you are using adaptive softmax."""
+ if self.sample_softmax > 0:
+ return self.out_layer
+ else:
+ return self.crit.out_layers[-1]
+
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **model_kwargs):
+ inputs = {}
+
+ # if past is defined in model kwargs then use it for faster decoding
+ if past_key_values:
+ inputs["mems"] = past_key_values
+ inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
+ else:
+ inputs["input_ids"] = input_ids
+
+ return inputs
+
+ def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
+ new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
+
+ self.crit.cutoffs = new_cutoffs
+ self.crit.cutoff_ends = [0] + new_cutoffs
+ self.crit.n_token = new_num_tokens
+
+ @staticmethod
+ def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
+ """
+ This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
+ generation step.
+ """
+ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
+
+
+@add_start_docstrings(
+ """
+ The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
+
+ [`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
+ models (e.g. GPT-1) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ TRANSFO_XL_START_DOCSTRING,
+)
+class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = TransfoXLModel(config)
+ self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TransfoXLSequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ mems: Optional[List[torch.FloatTensor]] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ mems=mems,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ assert (
+ self.config.pad_token_id is not None or batch_size == 1
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[range(batch_size), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TransfoXLSequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ mems=transformer_outputs.mems,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..addf2a08372bc00a377ab7410d977c31fb1d48eb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py
@@ -0,0 +1,252 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Utilities for PyTorch Transformer XL model. Directly adapted from https://github.com/kimiyoung/transformer-xl.
+"""
+
+
+import torch
+from torch import nn
+
+
+# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
+# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
+
+
+class ProjectedAdaptiveLogSoftmax(nn.Module):
+ def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
+ super().__init__()
+
+ self.n_token = n_token
+ self.d_embed = d_embed
+ self.d_proj = d_proj
+
+ self.cutoffs = cutoffs + [n_token]
+ self.cutoff_ends = [0] + self.cutoffs
+ self.div_val = div_val
+
+ self.shortlist_size = self.cutoffs[0]
+ self.n_clusters = len(self.cutoffs) - 1
+ self.head_size = self.shortlist_size + self.n_clusters
+
+ if self.n_clusters > 0:
+ self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
+ self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
+
+ self.out_layers = nn.ModuleList()
+ self.out_projs = nn.ParameterList()
+
+ if div_val == 1:
+ for i in range(len(self.cutoffs)):
+ if d_proj != d_embed:
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
+ else:
+ self.out_projs.append(None)
+
+ self.out_layers.append(nn.Linear(d_embed, n_token))
+ else:
+ for i in range(len(self.cutoffs)):
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ d_emb_i = d_embed // (div_val**i)
+
+ self.out_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
+
+ self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
+
+ self.keep_order = keep_order
+
+ def _compute_logit(self, hidden, weight, bias, proj):
+ if proj is None:
+ logit = nn.functional.linear(hidden, weight, bias=bias)
+ else:
+ # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
+ proj_hid = nn.functional.linear(hidden, proj.t().contiguous())
+ logit = nn.functional.linear(proj_hid, weight, bias=bias)
+ # else:
+ # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
+ # if bias is not None:
+ # logit = logit + bias
+
+ return logit
+
+ def forward(self, hidden, labels=None, keep_order=False):
+ """
+ Params:
+ hidden :: [len*bsz x d_proj]
+ labels :: [len*bsz]
+
+ Return:
+ if labels is None: out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary else: out ::
+ [(len-1)*bsz] Negative log likelihood. We could replace this implementation by the native PyTorch one if
+ theirs had an option to set bias on all clusters in the native one. here:
+ https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
+ """
+
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ hidden = hidden[..., :-1, :].contiguous()
+ labels = labels[..., 1:].contiguous()
+ hidden = hidden.view(-1, hidden.size(-1))
+ labels = labels.view(-1)
+ if hidden.size(0) != labels.size(0):
+ raise RuntimeError("Input and labels should have the same size in the batch dimension.")
+ else:
+ hidden = hidden.view(-1, hidden.size(-1))
+
+ if self.n_clusters == 0:
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
+ if labels is not None:
+ mask = labels != -100
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
+ out[mask] = (
+ -nn.functional.log_softmax(logit, dim=-1)[mask].gather(1, labels[mask].unsqueeze(1)).squeeze(1)
+ )
+ else:
+ out = nn.functional.log_softmax(logit, dim=-1)
+ else:
+ # construct weights and biases
+ weights, biases = [], []
+ for i in range(len(self.cutoffs)):
+ if self.div_val == 1:
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
+ else:
+ weight_i = self.out_layers[i].weight
+ bias_i = self.out_layers[i].bias
+
+ if i == 0:
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
+
+ weights.append(weight_i)
+ biases.append(bias_i)
+
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
+
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
+
+ if labels is None:
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
+ else:
+ out = torch.zeros_like(labels, dtype=hidden.dtype, device=hidden.device)
+
+ offset = 0
+ cutoff_values = [0] + self.cutoffs
+ for i in range(len(cutoff_values) - 1):
+ l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
+
+ if labels is not None:
+ mask_i = (labels >= l_idx) & (labels < r_idx)
+ indices_i = mask_i.nonzero().squeeze()
+
+ if indices_i.numel() == 0:
+ continue
+
+ target_i = labels.index_select(0, indices_i) - l_idx
+ head_logprob_i = head_logprob.index_select(0, indices_i)
+ hidden_i = hidden.index_select(0, indices_i)
+ else:
+ hidden_i = hidden
+
+ if i == 0:
+ if labels is not None:
+ logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
+ else:
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
+ else:
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
+
+ tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
+ cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
+ if labels is not None:
+ logprob_i = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
+ 1, target_i[:, None]
+ ).squeeze(1)
+ else:
+ logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
+ out[:, l_idx:r_idx] = logprob_i
+
+ if labels is not None:
+ if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
+ out.index_copy_(0, indices_i, -logprob_i)
+ else:
+ out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
+ offset += logprob_i.size(0)
+
+ return out
+
+ def log_prob(self, hidden):
+ r"""
+ Computes log probabilities for all \\(n\_classes\\) From:
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.p
+
+ Args:
+ hidden (Tensor): a minibatch of example
+
+ Returns:
+ log-probabilities of for each class \\(c\\) in range \\(0 <= c <= n\_classes\\), where \\(n\_classes\\) is
+ a parameter passed to `AdaptiveLogSoftmaxWithLoss` constructor. Shape:
+
+ - Input: \\((N, in\_features)\\)
+ - Output: \\((N, n\_classes)\\)
+ """
+ if self.n_clusters == 0:
+ logit = self._compute_logit(hidden, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0])
+ return nn.functional.log_softmax(logit, dim=-1)
+ else:
+ # construct weights and biases
+ weights, biases = [], []
+ for i in range(len(self.cutoffs)):
+ if self.div_val == 1:
+ l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
+ weight_i = self.out_layers[0].weight[l_idx:r_idx]
+ bias_i = self.out_layers[0].bias[l_idx:r_idx]
+ else:
+ weight_i = self.out_layers[i].weight
+ bias_i = self.out_layers[i].bias
+
+ if i == 0:
+ weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
+ bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
+
+ weights.append(weight_i)
+ biases.append(bias_i)
+
+ head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
+ head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
+
+ out = hidden.new_empty((head_logit.size(0), self.n_token))
+ head_logprob = nn.functional.log_softmax(head_logit, dim=1)
+
+ cutoff_values = [0] + self.cutoffs
+ for i in range(len(cutoff_values) - 1):
+ start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
+
+ if i == 0:
+ out[:, : self.cutoffs[0]] = head_logprob[:, : self.cutoffs[0]]
+ else:
+ weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
+
+ tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
+ tail_logprob_i = nn.functional.log_softmax(tail_logit_i, dim=1)
+
+ logprob_i = head_logprob[:, -i] + tail_logprob_i
+ out[:, start_idx, stop_idx] = logprob_i
+
+ return out
diff --git a/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
new file mode 100644
index 0000000000000000000000000000000000000000..7290a7a83b85660069bc2b88e4ba2734114f3f9b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py
@@ -0,0 +1,819 @@
+# coding=utf-8
+# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+ Tokenization classes for Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl.
+"""
+
+
+import glob
+import os
+import pickle
+import re
+from collections import Counter, OrderedDict
+from typing import List, Optional, Tuple
+
+import numpy as np
+
+from ....tokenization_utils import PreTrainedTokenizer
+from ....utils import (
+ cached_file,
+ is_sacremoses_available,
+ is_torch_available,
+ logging,
+ requires_backends,
+ strtobool,
+ torch_only_method,
+)
+
+
+if is_sacremoses_available():
+ import sacremoses as sm
+
+
+if is_torch_available():
+ import torch
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "pretrained_vocab_file": "vocab.pkl",
+ "pretrained_vocab_file_torch": "vocab.bin",
+ "vocab_file": "vocab.txt",
+}
+
+
+PRETRAINED_CORPUS_ARCHIVE_MAP = {
+ "transfo-xl/transfo-xl-wt103": "https://huggingface.co/transfo-xl/transfo-xl-wt103/resolve/main/corpus.bin",
+}
+CORPUS_NAME = "corpus.bin"
+
+MATCH_NUMBERS = r"(?<=\d)[,.](?=\d)", r" @\g<0>@ "
+DETOKENIZE_NUMBERS = [(r" @\,@ ", r","), (r" @\.@ ", r".")]
+
+
+def tokenize_numbers(text_array: List[str]) -> List[str]:
+ """
+ Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and
+ dots with ' @.@ '.
+
+ Args:
+ text_array: An already tokenized text as list.
+
+ Returns:
+ A list of strings with tokenized numbers.
+
+ Example:
+
+ ```python
+ >>> tokenize_numbers(["$", "5,000", "1.73", "m"])
+ ['$', '5', '@,@', '000', '1', '@.@', '73', 'm']
+ ```"""
+ tokenized = []
+ for i in range(len(text_array)):
+ reg, sub = MATCH_NUMBERS
+ replaced = re.sub(reg, sub, text_array[i]).split()
+ tokenized.extend(replaced)
+
+ return tokenized
+
+
+def detokenize_numbers(text: str) -> str:
+ """
+ Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'.
+
+ Args:
+ text: A string where the number should be detokenized.
+
+ Returns:
+ A detokenized string.
+
+ Example:
+
+ ```python
+ >>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m")
+ '$ 5,000 1.73 m'
+ ```"""
+ for reg, sub in DETOKENIZE_NUMBERS:
+ text = re.sub(reg, sub, text)
+ return text
+
+
+class TransfoXLTokenizer(PreTrainedTokenizer):
+ """
+ Construct a Transformer-XL tokenizer adapted from Vocab class in [the original
+ code](https://github.com/kimiyoung/transformer-xl). The Transformer-XL tokenizer is a word-level tokenizer (no
+ sub-word tokenization).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ special (`List[str]`, *optional*):
+ A list of special tokens (to be treated by the original implementation of this tokenizer).
+ min_freq (`int`, *optional*, defaults to 0):
+ The minimum number of times a token has to be present in order to be kept in the vocabulary (otherwise it
+ will be mapped to `unk_token`).
+ max_size (`int`, *optional*):
+ The maximum size of the vocabulary. If left unset, it will default to the size of the vocabulary found
+ after excluding the tokens according to the `min_freq` rule.
+ lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ delimiter (`str`, *optional*):
+ The delimiter used between tokens.
+ vocab_file (`str`, *optional*):
+ File containing the vocabulary (from the original implementation).
+ pretrained_vocab_file (`str`, *optional*):
+ File containing the vocabulary as saved with the `save_pretrained()` method.
+ never_split (`List[str]`, *optional*):
+ List of tokens that should never be split. If no list is specified, will simply use the existing special
+ tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `['']`):
+ A list of additional special tokens (for the HuggingFace functionality).
+ language (`str`, *optional*, defaults to `"en"`):
+ The language of this tokenizer (used for mose preprocessing).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids"]
+
+ def __init__(
+ self,
+ special=None,
+ min_freq=0,
+ max_size=None,
+ lower_case=False,
+ delimiter=None,
+ vocab_file=None,
+ pretrained_vocab_file: str = None,
+ never_split=None,
+ unk_token="",
+ eos_token="",
+ additional_special_tokens=[""],
+ language="en",
+ **kwargs,
+ ):
+ logger.error(
+ "`TransfoXL` was deprecated due to security issues linked to `pickle.load` in `TransfoXLTokenizer`. "
+ "See more details on this model's documentation page: "
+ "`https://github.com/huggingface/transformers/blob/main/docs/source/en/model_doc/transfo-xl.md`."
+ )
+
+ requires_backends(self, "sacremoses")
+ if special is None:
+ special = []
+ self.counter = Counter()
+ self.special = special
+ self.min_freq = min_freq
+ self.max_size = max_size
+ self.lower_case = lower_case
+ self.delimiter = delimiter
+ self.vocab_file = vocab_file
+ self.punctuation_symbols = '!"#$%&()*+,-./\\:;<=>?@[\\]^_`{|}~'
+ self.punction_without_space_before_pattern = re.compile(rf"[^\s][{self.punctuation_symbols}]")
+ self.punctuation_with_space_around_pattern = self._compile_space_around_punctuation_pattern()
+ self.language = language
+ self.moses_punct_normalizer = sm.MosesPunctNormalizer(language)
+ self.moses_tokenizer = sm.MosesTokenizer(language)
+ self.moses_detokenizer = sm.MosesDetokenizer(language)
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+ # This try... catch... is not beautiful but honestly this tokenizer was not made to be used
+ # in a library like ours, at all.
+ try:
+ vocab_dict = None
+ if pretrained_vocab_file is not None:
+ # Priority on pickle files (support PyTorch and TF)
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is "
+ "potentially malicious. It's recommended to never unpickle data that could have come from an "
+ "untrusted source, or that could have been tampered with. If you already verified the pickle "
+ "data and decided to use it, you can set the environment variable "
+ "`TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(pretrained_vocab_file, "rb") as f:
+ vocab_dict = pickle.load(f)
+
+ # Loading a torch-saved transfo-xl vocab dict with pickle results in an integer
+ # Entering this if statement means that we tried to load a torch-saved file with pickle, and we failed.
+ # We therefore load it with torch, if it's available.
+ if isinstance(vocab_dict, int):
+ if not is_torch_available():
+ raise ImportError(
+ "Not trying to load dict with PyTorch as you need to install pytorch to load "
+ "from a PyTorch pretrained vocabulary, "
+ "or activate it with environment variables USE_TORCH=1 and USE_TF=0."
+ )
+ vocab_dict = torch.load(pretrained_vocab_file)
+
+ if vocab_dict is not None:
+ for key, value in vocab_dict.items():
+ if key not in self.__dict__ or key in ["sym2idx", "idx2sym"]:
+ self.__dict__[key] = value
+ elif vocab_file is not None:
+ self.build_vocab()
+
+ except Exception as e:
+ raise ValueError(
+ f"Unable to parse file {pretrained_vocab_file}. Unknown format. "
+ "If you tried to load a model saved through TransfoXLTokenizerFast, "
+ "please note they are not compatible."
+ ) from e
+
+ if vocab_file is not None:
+ self.build_vocab()
+
+ super().__init__(
+ special=special,
+ min_freq=min_freq,
+ max_size=max_size,
+ lower_case=lower_case,
+ delimiter=delimiter,
+ vocab_file=vocab_file,
+ pretrained_vocab_file=pretrained_vocab_file,
+ never_split=never_split,
+ unk_token=unk_token,
+ eos_token=eos_token,
+ additional_special_tokens=additional_special_tokens,
+ language=language,
+ **kwargs,
+ )
+
+ # these are not required to initialize the parent class as only used when tokenizing.
+ if never_split is None:
+ never_split = self.all_special_tokens
+ self.never_split = never_split
+
+ @property
+ def do_lower_case(self):
+ return self.lower_case
+
+ def _compile_space_around_punctuation_pattern(self):
+ look_ahead_for_special_token = f"(?=[{self.punctuation_symbols}])"
+ look_ahead_to_match_all_except_space = r"(?=[^\s])"
+ return re.compile(r"" + look_ahead_for_special_token + look_ahead_to_match_all_except_space)
+
+ def count_file(self, path, verbose=False, add_eos=False):
+ if verbose:
+ logger.info(f"counting file {path} ...")
+ assert os.path.exists(path), f"Input file {path} not found"
+
+ sents = []
+ with open(path, "r", encoding="utf-8") as f:
+ for idx, line in enumerate(f):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ symbols = self.tokenize(line, add_eos=add_eos)
+ self.counter.update(symbols)
+ sents.append(symbols)
+
+ return sents
+
+ def count_sents(self, sents, verbose=False):
+ """
+ sents : a list of sentences, each a list of tokenized symbols
+ """
+ if verbose:
+ logger.info(f"counting {len(sents)} sents ...")
+ for idx, symbols in enumerate(sents):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ self.counter.update(symbols)
+
+ def _build_from_file(self, vocab_file):
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+
+ with open(vocab_file, "r", encoding="utf-8") as f:
+ for line in f:
+ symb = line.strip().split()[0]
+ self.add_symbol(symb)
+ if "" in self.sym2idx:
+ self.unk_idx = self.sym2idx[""]
+ elif "" in self.sym2idx:
+ self.unk_idx = self.sym2idx[""]
+ else:
+ raise ValueError("Token not in vocabulary and no token in vocabulary for replacement.")
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["pretrained_vocab_file"],
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "wb") as f:
+ pickle.dump(self.__dict__, f)
+ return (vocab_file,)
+
+ def build_vocab(self):
+ if self.vocab_file:
+ logger.info(f"building vocab from {self.vocab_file}")
+ self._build_from_file(self.vocab_file)
+ logger.info(f"Final vocab size {len(self.sym2idx)}")
+ else:
+ logger.info(f"building vocab with min_freq={self.min_freq}, max_size={self.max_size}")
+ self.idx2sym = []
+ self.sym2idx = OrderedDict()
+
+ for sym in self.special:
+ self.add_special(sym)
+
+ for sym, cnt in self.counter.most_common(self.max_size):
+ if cnt < self.min_freq:
+ break
+ self.add_symbol(sym)
+
+ logger.info(f"Final vocab size {len(self.sym2idx)} from {len(self.counter)} unique tokens")
+
+ @torch_only_method
+ def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False):
+ if verbose:
+ logger.info(f"encoding file {path} ...")
+ assert os.path.exists(path), f"Output file {path} not found"
+ encoded = []
+ with open(path, "r", encoding="utf-8") as f:
+ for idx, line in enumerate(f):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos)
+ encoded.append(self.convert_to_tensor(symbols))
+
+ if ordered:
+ encoded = torch.cat(encoded)
+
+ return encoded
+
+ @torch_only_method
+ def encode_sents(self, sents, ordered=False, verbose=False):
+ if verbose:
+ logger.info(f"encoding {len(sents)} sents ...")
+ encoded = []
+ for idx, symbols in enumerate(sents):
+ if verbose and idx > 0 and idx % 500000 == 0:
+ logger.info(f" line {idx}")
+ encoded.append(self.convert_to_tensor(symbols))
+
+ if ordered:
+ encoded = torch.cat(encoded)
+
+ return encoded
+
+ def add_special(self, sym):
+ if sym not in self.sym2idx:
+ self.idx2sym.append(sym)
+ self.sym2idx[sym] = len(self.idx2sym) - 1
+ setattr(self, f"{sym.strip('<>')}_idx", self.sym2idx[sym])
+
+ def add_symbol(self, sym):
+ if sym not in self.sym2idx:
+ self.idx2sym.append(sym)
+ self.sym2idx[sym] = len(self.idx2sym) - 1
+
+ def move_added_token(self, token: str, target_idx: int):
+ """
+ Moves an added token to a specific position in the vocab. This method should be used when resizing an embedding
+ layer other than the last one in the `AdaptiveEmbedding` in order to move the token in the tokenizer from the
+ default position (at the very end) to the desired one.
+
+ Args:
+ token: The token to move to a specific position in the vocab.
+ target_idx: The position where the token should be moved to.
+ """
+ assert token in self.added_tokens_encoder, "Token which should be moved has to be an added token"
+ assert token not in self.idx2sym, "Token which should be moved is already in vocab"
+
+ # Insert sym into vocab
+ self.idx2sym.insert(target_idx, token)
+ self.sym2idx[token] = target_idx
+
+ # Shift following indices in sym2idx
+ for idx in range(target_idx + 1, len(self.idx2sym)):
+ current_sym = self.idx2sym[idx]
+ self.sym2idx[current_sym] = idx
+
+ # Delete token from added_tokens
+ old_index = self._added_tokens_encoder.pop(token)
+ self._added_tokens_decoder.pop(old_index)
+
+ def moses_punct_norm(self, text):
+ return self.moses_punct_normalizer.normalize(text)
+
+ def moses_tokenize(self, text):
+ return self.moses_tokenizer.tokenize(
+ text, aggressive_dash_splits=True, return_str=False, escape=False, protected_patterns=self.never_split
+ )
+
+ def moses_pipeline(self, text: str) -> List[str]:
+ """
+ Does basic tokenization using [`sacremoses.MosesPunctNormalizer`] and [`sacremoses.MosesTokenizer`] with
+ *aggressive_dash_splits=True* (see [`sacremoses.tokenize.MosesTokenizer.tokenize`]). Additionally, large
+ comma-separated numbers and floating point values are split. E.g. "23,000 people are 1.80m tall" -> "23 @,@ 000
+ people are 1 @.@ 80m tall"
+
+ Args:
+ text: Text to be tokenize
+
+ Returns:
+ A list of tokenized string
+
+ Example:
+
+ ```python
+ >>> tokenizer = TransfoXLTokenizer.from_pretrained("transfo-xl/transfo-xl-wt103")
+ >>> tokenizer.moses_pipeline("23,000 people are 1.80 m tall")
+ ['23', '@,@', '000', 'people', 'are', '1', '@.@', '80', 'm', 'tall']
+ ```"""
+ text = self.moses_punct_norm(text)
+ text = self.moses_tokenize(text)
+ text = tokenize_numbers(text)
+ return text
+
+ def _convert_id_to_token(self, idx):
+ """Converts an id in a token (BPE) using the vocab."""
+ assert 0 <= idx < len(self), f"Index {idx} out of vocabulary range"
+ return self.idx2sym[idx]
+
+ def _convert_token_to_id(self, sym):
+ """Converts a token (str) in an id using the vocab."""
+ if sym in self.sym2idx:
+ return self.sym2idx[sym]
+ else:
+ # logger.info(f'encounter unk {sym}')
+ # assert '' not in sym
+ if hasattr(self, "unk_idx"):
+ return self.sym2idx.get(sym, self.unk_idx)
+ # Backward compatibility with pre-trained models
+ elif "" in self.sym2idx:
+ return self.sym2idx[""]
+ elif "" in self.sym2idx:
+ return self.sym2idx[""]
+ else:
+ raise ValueError("Token not in vocabulary and no token in vocabulary for replacement.")
+
+ def convert_tokens_to_string(self, tokens):
+ """
+ Converts a sequence of tokens (string) in a single string. Additionally, the split numbers are converted back
+ into it's original form.
+ """
+ out_string = self.moses_detokenizer.detokenize(tokens)
+ return detokenize_numbers(out_string).strip()
+
+ @torch_only_method
+ def convert_to_tensor(self, symbols):
+ return torch.LongTensor(self.convert_tokens_to_ids(symbols))
+
+ @property
+ def vocab_size(self):
+ return len(self.idx2sym)
+
+ def get_vocab(self):
+ vocab = self.sym2idx.copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, line, add_eos=False, add_double_eos=False):
+ line = line.strip()
+ # convert to lower case
+ if self.lower_case:
+ line = line.lower()
+
+ # empty delimiter '' will evaluate False
+ if self.delimiter == "":
+ symbols = line
+ else:
+ symbols = self.moses_pipeline(line)
+
+ if add_double_eos: # lm1b
+ return [""] + symbols + [""]
+ elif add_eos:
+ return symbols + [""]
+ else:
+ return symbols
+
+
+class LMOrderedIterator(object):
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None):
+ """
+ data -- LongTensor -- the LongTensor is strictly ordered
+ """
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+
+ # Work out how cleanly we can divide the dataset into bsz parts.
+ self.n_step = data.size(0) // bsz
+
+ # Trim off any extra elements that wouldn't cleanly fit (remainders).
+ data = data.narrow(0, 0, self.n_step * bsz)
+
+ # Evenly divide the data across the bsz batches.
+ self.data = data.view(bsz, -1).t().contiguous().to(device)
+
+ # Number of mini-batches
+ self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
+
+ def get_batch(self, i, bptt=None):
+ if bptt is None:
+ bptt = self.bptt
+ seq_len = min(bptt, self.data.size(0) - 1 - i)
+
+ end_idx = i + seq_len
+ beg_idx = max(0, i - self.ext_len)
+
+ data = self.data[beg_idx:end_idx]
+ target = self.data[i + 1 : i + 1 + seq_len]
+
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
+
+ return data_out, target_out, seq_len
+
+ def get_fixlen_iter(self, start=0):
+ for i in range(start, self.data.size(0) - 1, self.bptt):
+ yield self.get_batch(i)
+
+ def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
+ max_len = self.bptt + max_deviation * std
+ i = start
+ while True:
+ bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
+ bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
+ data, target, seq_len = self.get_batch(i, bptt)
+ i += seq_len
+ yield data, target, seq_len
+ if i >= self.data.size(0) - 2:
+ break
+
+ def __iter__(self):
+ return self.get_fixlen_iter()
+
+
+class LMShuffledIterator(object):
+ def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
+ """
+ data -- list[LongTensor] -- there is no order among the LongTensors
+ """
+ self.data = data
+
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+ self.shuffle = shuffle
+
+ def get_sent_stream(self):
+ # index iterator
+ epoch_indices = np.random.permutation(len(self.data)) if self.shuffle else np.array(range(len(self.data)))
+
+ # sentence iterator
+ for idx in epoch_indices:
+ yield self.data[idx]
+
+ @torch_only_method
+ def stream_iterator(self, sent_stream):
+ # streams for each data in the batch
+ streams = [None] * self.bsz
+
+ data = torch.LongTensor(self.bptt, self.bsz)
+ target = torch.LongTensor(self.bptt, self.bsz)
+
+ n_retain = 0
+
+ while True:
+ # data : [n_retain+bptt x bsz]
+ # target : [bptt x bsz]
+ data[n_retain:].fill_(-1)
+ target.fill_(-1)
+
+ valid_batch = True
+
+ for i in range(self.bsz):
+ n_filled = 0
+ try:
+ while n_filled < self.bptt:
+ if streams[i] is None or len(streams[i]) <= 1:
+ streams[i] = next(sent_stream)
+ # number of new tokens to fill in
+ n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
+ # first n_retain tokens are retained from last batch
+ data[n_retain + n_filled : n_retain + n_filled + n_new, i] = streams[i][:n_new]
+ target[n_filled : n_filled + n_new, i] = streams[i][1 : n_new + 1]
+ streams[i] = streams[i][n_new:]
+ n_filled += n_new
+ except StopIteration:
+ valid_batch = False
+ break
+
+ if not valid_batch:
+ return
+
+ data_out = data.transpose(0, 1).contiguous().to(self.device)
+ target_out = target.transpose(0, 1).contiguous().to(self.device)
+
+ yield data_out, target_out, self.bptt
+
+ n_retain = min(data.size(0), self.ext_len)
+ if n_retain > 0:
+ data[:n_retain] = data[-n_retain:]
+ data.resize_(n_retain + self.bptt, data.size(1))
+
+ def __iter__(self):
+ # sent_stream is an iterator
+ sent_stream = self.get_sent_stream()
+
+ for batch in self.stream_iterator(sent_stream):
+ yield batch
+
+
+class LMMultiFileIterator(LMShuffledIterator):
+ def __init__(self, paths, vocab, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
+ self.paths = paths
+ self.vocab = vocab
+
+ self.bsz = bsz
+ self.bptt = bptt
+ self.ext_len = ext_len if ext_len is not None else 0
+
+ self.device = device
+ self.shuffle = shuffle
+
+ def get_sent_stream(self, path):
+ sents = self.vocab.encode_file(path, add_double_eos=True)
+ if self.shuffle:
+ np.random.shuffle(sents)
+ sent_stream = iter(sents)
+
+ return sent_stream
+
+ def __iter__(self):
+ if self.shuffle:
+ np.random.shuffle(self.paths)
+
+ for path in self.paths:
+ # sent_stream is an iterator
+ sent_stream = self.get_sent_stream(path)
+ for batch in self.stream_iterator(sent_stream):
+ yield batch
+
+
+class TransfoXLCorpus(object):
+ @classmethod
+ @torch_only_method
+ def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
+ """
+ Instantiate a pre-processed corpus.
+ """
+ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
+ is_local = os.path.isdir(pretrained_model_name_or_path)
+ # redirect to the cache, if necessary
+ try:
+ resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir)
+ except EnvironmentError:
+ logger.error(
+ f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list"
+ f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'"
+ f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url."
+ )
+ return None
+ if is_local:
+ logger.info(f"loading corpus file {resolved_corpus_file}")
+ else:
+ logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}")
+
+ # Instantiate tokenizer.
+ corpus = cls(*inputs, **kwargs)
+ corpus_dict = torch.load(resolved_corpus_file)
+ for key, value in corpus_dict.items():
+ corpus.__dict__[key] = value
+ corpus.vocab = vocab
+ if corpus.train is not None:
+ corpus.train = torch.tensor(corpus.train, dtype=torch.long)
+ if corpus.valid is not None:
+ corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
+ if corpus.test is not None:
+ corpus.test = torch.tensor(corpus.test, dtype=torch.long)
+ return corpus
+
+ def __init__(self, *args, **kwargs):
+ self.vocab = TransfoXLTokenizer(*args, **kwargs)
+ self.dataset = None
+ self.train = None
+ self.valid = None
+ self.test = None
+
+ def build_corpus(self, path, dataset):
+ self.dataset = dataset
+
+ if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
+ self.vocab.count_file(os.path.join(path, "train.txt"))
+ self.vocab.count_file(os.path.join(path, "valid.txt"))
+ self.vocab.count_file(os.path.join(path, "test.txt"))
+ elif self.dataset == "wt103":
+ self.vocab.count_file(os.path.join(path, "train.txt"))
+ elif self.dataset == "lm1b":
+ train_path_pattern = os.path.join(
+ path,
+ "1-billion-word-language-modeling-benchmark-r13output",
+ "training-monolingual.tokenized.shuffled",
+ "news.en-*",
+ )
+ train_paths = glob.glob(train_path_pattern)
+ # the vocab will load from file when build_vocab() is called
+
+ self.vocab.build_vocab()
+
+ if self.dataset in ["ptb", "wt2", "wt103"]:
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
+ elif self.dataset in ["enwik8", "text8"]:
+ self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
+ elif self.dataset == "lm1b":
+ self.train = train_paths
+ self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
+ self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
+
+ def get_iterator(self, split, *args, **kwargs):
+ if split == "train":
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
+ data_iter = LMOrderedIterator(self.train, *args, **kwargs)
+ elif self.dataset == "lm1b":
+ kwargs["shuffle"] = True
+ data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
+ elif split in ["valid", "test"]:
+ data = self.valid if split == "valid" else self.test
+ if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
+ data_iter = LMOrderedIterator(data, *args, **kwargs)
+ elif self.dataset == "lm1b":
+ data_iter = LMShuffledIterator(data, *args, **kwargs)
+ else:
+ data_iter = None
+ raise ValueError(f"Split not recognized: {split}")
+
+ return data_iter
+
+
+@torch_only_method
+def get_lm_corpus(datadir, dataset):
+ fn = os.path.join(datadir, "cache.pt")
+ fn_pickle = os.path.join(datadir, "cache.pkl")
+ if os.path.exists(fn):
+ logger.info("Loading cached dataset...")
+ corpus = torch.load(fn_pickle)
+ elif os.path.exists(fn):
+ logger.info("Loading cached dataset from pickle...")
+ if not strtobool(os.environ.get("TRUST_REMOTE_CODE", "False")):
+ raise ValueError(
+ "This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially "
+ "malicious. It's recommended to never unpickle data that could have come from an untrusted source, or "
+ "that could have been tampered with. If you already verified the pickle data and decided to use it, "
+ "you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it."
+ )
+ with open(fn, "rb") as fp:
+ corpus = pickle.load(fp)
+ else:
+ logger.info(f"Producing dataset {dataset}...")
+ kwargs = {}
+ if dataset in ["wt103", "wt2"]:
+ kwargs["special"] = [""]
+ kwargs["lower_case"] = False
+ elif dataset == "ptb":
+ kwargs["special"] = [""]
+ kwargs["lower_case"] = True
+ elif dataset == "lm1b":
+ kwargs["special"] = []
+ kwargs["lower_case"] = False
+ kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
+ elif dataset in ["enwik8", "text8"]:
+ pass
+
+ corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
+ torch.save(corpus, fn)
+
+ return corpus
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/nezha/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9078fc4a5667a9dae6cf9f8c02177a9583b5e74
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nezha/__init__.py
@@ -0,0 +1,69 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_nezha"] = [
+ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "NezhaForNextSentencePrediction",
+ "NezhaForMaskedLM",
+ "NezhaForPreTraining",
+ "NezhaForMultipleChoice",
+ "NezhaForQuestionAnswering",
+ "NezhaForSequenceClassification",
+ "NezhaForTokenClassification",
+ "NezhaModel",
+ "NezhaPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_nezha import (
+ NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ NezhaForMaskedLM,
+ NezhaForMultipleChoice,
+ NezhaForNextSentencePrediction,
+ NezhaForPreTraining,
+ NezhaForQuestionAnswering,
+ NezhaForSequenceClassification,
+ NezhaForTokenClassification,
+ NezhaModel,
+ NezhaPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29aa4000d6babac9994802d33b1fec7ab4ba15ac
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74f2846c7c3198d19b8352d48042d8d62adb7353
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/configuration_nezha.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e39cb5c9c0551e2950ca6874772bbd83b6e7424
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nezha/__pycache__/modeling_nezha.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py b/venv/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py
new file mode 100644
index 0000000000000000000000000000000000000000..a19c27d62a4a924b4950a1ddb8f7b8500b6cfe39
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nezha/configuration_nezha.py
@@ -0,0 +1,103 @@
+from ... import PretrainedConfig
+from ..deprecated._archive_maps import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class NezhaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the Nezha
+ [sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, optional, defaults to 21128):
+ Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the
+ *inputs_ids* passed to the forward method of [`NezhaModel`].
+ hidden_size (`int`, optional, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, optional, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, optional, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, optional, defaults to 3072):
+ The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, optional, defaults to "gelu"):
+ The non-linear activation function (function or string) in the encoder and pooler.
+ hidden_dropout_prob (`float`, optional, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, optional, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, optional, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ (e.g., 512 or 1024 or 2048).
+ type_vocab_size (`int`, optional, defaults to 2):
+ The vocabulary size of the *token_type_ids* passed into [`NezhaModel`].
+ initializer_range (`float`, optional, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, optional, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ classifier_dropout (`float`, optional, defaults to 0.1):
+ The dropout ratio for attached classifiers.
+ is_decoder (`bool`, *optional*, defaults to `False`):
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
+
+ Example:
+
+ ```python
+ >>> from transformers import NezhaConfig, NezhaModel
+
+ >>> # Initializing an Nezha configuration
+ >>> configuration = NezhaConfig()
+
+ >>> # Initializing a model (with random weights) from the Nezha-base style configuration model
+ >>> model = NezhaModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "nezha"
+
+ def __init__(
+ self,
+ vocab_size=21128,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ max_relative_position=64,
+ type_vocab_size=2,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ classifier_dropout=0.1,
+ pad_token_id=0,
+ bos_token_id=2,
+ eos_token_id=3,
+ use_cache=True,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.max_relative_position = max_relative_position
+ self.type_vocab_size = type_vocab_size
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.classifier_dropout = classifier_dropout
+ self.use_cache = use_cache
diff --git a/venv/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py b/venv/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d983bd2378903f5234daeb1b791057ff37b5b15
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/nezha/modeling_nezha.py
@@ -0,0 +1,1693 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Nezha model."""
+
+
+import math
+import os
+import warnings
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPoolingAndCrossAttentions,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ NextSentencePredictorOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import (
+ ModelOutput,
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_nezha import NezhaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "sijunhe/nezha-cn-base"
+_CONFIG_FOR_DOC = "NezhaConfig"
+
+
+from ..deprecated._archive_maps import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def load_tf_weights_in_nezha(model, config, tf_checkpoint_path):
+ """Load tf checkpoints in a pytorch model."""
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
+ # which are not required for using pretrained model
+ if any(
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "squad":
+ pointer = getattr(pointer, "classifier")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+class NezhaRelativePositionsEncoding(nn.Module):
+ """Implement the Functional Relative Position Encoding"""
+
+ def __init__(self, length, depth, max_relative_position=127):
+ super().__init__()
+ vocab_size = max_relative_position * 2 + 1
+ range_vec = torch.arange(length)
+ range_mat = range_vec.repeat(length).view(length, length)
+ distance_mat = range_mat - torch.t(range_mat)
+ distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
+ final_mat = distance_mat_clipped + max_relative_position
+
+ embeddings_table = torch.zeros(vocab_size, depth)
+ position = torch.arange(0, vocab_size, dtype=torch.int64).float().unsqueeze(1)
+ div_term = torch.exp(torch.arange(0, depth, 2).float() * (-math.log(10000.0) / depth))
+ embeddings_table[:, 0::2] = torch.sin(position * div_term)
+ embeddings_table[:, 1::2] = torch.cos(position * div_term)
+
+ flat_relative_positions_matrix = final_mat.view(-1)
+ one_hot_relative_positions_matrix = torch.nn.functional.one_hot(
+ flat_relative_positions_matrix, num_classes=vocab_size
+ ).float()
+ positions_encoding = torch.matmul(one_hot_relative_positions_matrix, embeddings_table)
+ my_shape = list(final_mat.size())
+ my_shape.append(depth)
+ positions_encoding = positions_encoding.view(my_shape)
+ self.register_buffer("positions_encoding", positions_encoding, persistent=False)
+
+ def forward(self, length):
+ return self.positions_encoding[:length, :length, :]
+
+
+class NezhaEmbeddings(nn.Module):
+ """Construct the embeddings from word and token_type embeddings."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.register_buffer(
+ "token_type_ids", torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
+ # issue #5664
+ if token_type_ids is None:
+ if hasattr(self, "token_type_ids"):
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device)
+
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
+
+ embeddings = inputs_embeds + token_type_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class NezhaSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.relative_positions_encoding = NezhaRelativePositionsEncoding(
+ length=config.max_position_embeddings,
+ depth=self.attention_head_size,
+ max_relative_position=config.max_relative_position,
+ )
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+
+ batch_size, num_attention_heads, from_seq_length, to_seq_length = attention_scores.size()
+ relations_keys = self.relative_positions_encoding(to_seq_length)
+ query_layer_t = query_layer.permute(2, 0, 1, 3)
+
+ query_layer_r = query_layer_t.contiguous().view(
+ from_seq_length, batch_size * num_attention_heads, self.attention_head_size
+ )
+ key_position_scores = torch.matmul(query_layer_r, relations_keys.permute(0, 2, 1))
+ key_position_scores_r = key_position_scores.view(
+ from_seq_length, batch_size, num_attention_heads, from_seq_length
+ )
+ key_position_scores_r_t = key_position_scores_r.permute(1, 2, 0, 3)
+ attention_scores = attention_scores + key_position_scores_r_t
+
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in NezhaModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+ relations_values = self.relative_positions_encoding(to_seq_length)
+ attention_probs_t = attention_probs.permute(2, 0, 1, 3)
+ attentions_probs_r = attention_probs_t.contiguous().view(
+ from_seq_length, batch_size * num_attention_heads, to_seq_length
+ )
+ value_position_scores = torch.matmul(attentions_probs_r, relations_values)
+ value_position_scores_r = value_position_scores.view(
+ from_seq_length, batch_size, num_attention_heads, self.attention_head_size
+ )
+ value_position_scores_r_t = value_position_scores_r.permute(1, 2, 0, 3)
+ context_layer = context_layer + value_position_scores_r_t
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Nezha
+class NezhaSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class NezhaAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = NezhaSelfAttention(config)
+ self.output = NezhaSelfOutput(config)
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Nezha
+class NezhaIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nezha
+class NezhaOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class NezhaLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = NezhaAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = NezhaAttention(config)
+ self.intermediate = NezhaIntermediate(config)
+ self.output = NezhaOutput(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Nezha
+class NezhaEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([NezhaLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ output_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = True,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [
+ hidden_states,
+ next_decoder_cache,
+ all_hidden_states,
+ all_self_attentions,
+ all_cross_attentions,
+ ]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Nezha
+class NezhaPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nezha
+class NezhaPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Nezha
+class NezhaLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = NezhaPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nezha
+class NezhaOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = NezhaLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Nezha
+class NezhaOnlyNSPHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, pooled_output):
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return seq_relationship_score
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Nezha
+class NezhaPreTrainingHeads(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = NezhaLMPredictionHead(config)
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
+
+ def forward(self, sequence_output, pooled_output):
+ prediction_scores = self.predictions(sequence_output)
+ seq_relationship_score = self.seq_relationship(pooled_output)
+ return prediction_scores, seq_relationship_score
+
+
+class NezhaPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = NezhaConfig
+ load_tf_weights = load_tf_weights_in_nezha
+ base_model_prefix = "nezha"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+@dataclass
+class NezhaForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`NezhaForPreTraining`].
+
+ Args:
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
+ (classification) loss.
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ prediction_logits: torch.FloatTensor = None
+ seq_relationship_logits: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+NEZHA_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`NezhaConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+NEZHA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Nezha Model transformer outputting raw hidden-states without any specific head on top.",
+ NEZHA_START_DOCSTRING,
+)
+class NezhaModel(NezhaPreTrainedModel):
+ """
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
+ to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
+ `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = NezhaEmbeddings(config)
+ self.encoder = NezhaEncoder(config)
+
+ self.pooler = NezhaPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPoolingAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
+ r"""
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
+ the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if self.config.is_decoder:
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ else:
+ use_cache = False
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ batch_size, seq_length = input_shape
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if attention_mask is None:
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
+
+ if token_type_ids is None:
+ if hasattr(self.embeddings, "token_type_ids"):
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
+ token_type_ids = buffered_token_type_ids_expanded
+ else:
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D or 3D attention mask is provided for the cross-attention
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPoolingAndCrossAttentions(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ past_key_values=encoder_outputs.past_key_values,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ cross_attentions=encoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nezha Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
+ sentence prediction (classification)` head.
+ """,
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForPreTraining(NezhaPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.nezha = NezhaModel(config)
+ self.cls = NezhaPreTrainingHeads(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NezhaForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ next_sentence_label: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], NezhaForPreTrainingOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
+ the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
+ pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+ kwargs (`Dict[str, any]`, optional, defaults to *{}*):
+ Used to hide legacy arguments that have been deprecated.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, NezhaForPreTraining
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
+ >>> model = NezhaForPreTraining.from_pretrained("sijunhe/nezha-cn-base")
+
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> prediction_logits = outputs.prediction_logits
+ >>> seq_relationship_logits = outputs.seq_relationship_logits
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output, pooled_output = outputs[:2]
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
+
+ total_loss = None
+ if labels is not None and next_sentence_label is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+ next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
+ total_loss = masked_lm_loss + next_sentence_loss
+
+ if not return_dict:
+ output = (prediction_scores, seq_relationship_score) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return NezhaForPreTrainingOutput(
+ loss=total_loss,
+ prediction_logits=prediction_scores,
+ seq_relationship_logits=seq_relationship_score,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings("""Nezha Model with a `language modeling` head on top.""", NEZHA_START_DOCSTRING)
+class NezhaForMaskedLM(NezhaPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `NezhaForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
+ self.cls = NezhaOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
+ input_shape = input_ids.shape
+ effective_batch_size = input_shape[0]
+
+ # add a dummy token
+ if self.config.pad_token_id is None:
+ raise ValueError("The PAD token should be defined for generation")
+
+ attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
+ dummy_token = torch.full(
+ (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
+ )
+ input_ids = torch.cat([input_ids, dummy_token], dim=1)
+
+ return {"input_ids": input_ids, "attention_mask": attention_mask}
+
+
+@add_start_docstrings(
+ """Nezha Model with a `next sentence prediction (classification)` head on top.""",
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForNextSentencePrediction(NezhaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.nezha = NezhaModel(config)
+ self.cls = NezhaOnlyNSPHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
+ (see `input_ids` docstring). Indices should be in `[0, 1]`:
+
+ - 0 indicates sequence B is a continuation of sequence A,
+ - 1 indicates sequence B is a random sequence.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, NezhaForNextSentencePrediction
+ >>> import torch
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("sijunhe/nezha-cn-base")
+ >>> model = NezhaForNextSentencePrediction.from_pretrained("sijunhe/nezha-cn-base")
+
+ >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
+ >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
+ >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
+
+ >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
+ >>> logits = outputs.logits
+ >>> assert logits[0, 0] < logits[0, 1] # next sentence was random
+ ```
+ """
+
+ if "next_sentence_label" in kwargs:
+ warnings.warn(
+ "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
+ " `labels` instead.",
+ FutureWarning,
+ )
+ labels = kwargs.pop("next_sentence_label")
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ seq_relationship_scores = self.cls(pooled_output)
+
+ next_sentence_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
+
+ if not return_dict:
+ output = (seq_relationship_scores,) + outputs[2:]
+ return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
+
+ return NextSentencePredictorOutput(
+ loss=next_sentence_loss,
+ logits=seq_relationship_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nezha Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForSequenceClassification(NezhaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.config = config
+
+ self.nezha = NezhaModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nezha Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForMultipleChoice(NezhaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.nezha = NezhaModel(config)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
+ inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+ print(pooled_output.shape)
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ print(logits.shape)
+ print(num_choices)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nezha Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForTokenClassification(NezhaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
+ classifier_dropout = (
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
+ )
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Nezha Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ NEZHA_START_DOCSTRING,
+)
+class NezhaForQuestionAnswering(NezhaPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.nezha = NezhaModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(NEZHA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ token_type_ids: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ start_positions: Optional[torch.Tensor] = None,
+ end_positions: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.nezha(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/opt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..db1c9300824b3825c8fa752ef4599f542d148076
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/__init__.py
@@ -0,0 +1,101 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_opt"] = [
+ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "OPTForCausalLM",
+ "OPTModel",
+ "OPTPreTrainedModel",
+ "OPTForSequenceClassification",
+ "OPTForQuestionAnswering",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_opt"] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_opt"] = [
+ "FlaxOPTForCausalLM",
+ "FlaxOPTModel",
+ "FlaxOPTPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_opt import (
+ OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ OPTForCausalLM,
+ OPTForQuestionAnswering,
+ OPTForSequenceClassification,
+ OPTModel,
+ OPTPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4f51dd5f0c186f29cfb9111fc6222317406474b1
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..739716870f8d95e0b9098bb9db496e9641200b68
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/configuration_opt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/convert_opt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/convert_opt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..161d529a9a2ca6ba9679403a328350ef3eb84124
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/convert_opt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0ae601066223a9d06919472cfa4d6fffb1ee9d9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_flax_opt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_opt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5f830556b5276b30f796c6b49e1600fc56da0b0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_opt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_tf_opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_tf_opt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8b48a785b8594d298885d945c058d3a59fa4763
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/opt/__pycache__/modeling_tf_opt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/configuration_opt.py b/venv/lib/python3.10/site-packages/transformers/models/opt/configuration_opt.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9802d2ef337c85cd4f32530b14844d423823917
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/configuration_opt.py
@@ -0,0 +1,142 @@
+# coding=utf-8
+# Copyright 2022 The Metaseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" OPT model configuration"""
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class OPTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the OPT
+ [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50272):
+ Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`OPTModel`]
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ ffn_dim (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ do_layer_norm_before (`bool`, *optional*, defaults to `True`):
+ Whether to perform layer normalization before the attention block.
+ word_embed_proj_dim (`int`, *optional*):
+ `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to
+ `hidden_size`.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
+ details.
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ enable_bias (`bool`, *optional*, defaults to `True`):
+ Whether or not if the linear layers in the attention blocks should use the bias term.
+ layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
+ Whether or not if the layer norms should have learnable parameters.
+
+ Example:
+
+ ```python
+ >>> from transformers import OPTConfig, OPTModel
+
+ >>> # Initializing a OPT facebook/opt-large style configuration
+ >>> configuration = OPTConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration
+ >>> model = OPTModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "opt"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=50272,
+ hidden_size=768,
+ num_hidden_layers=12,
+ ffn_dim=3072,
+ max_position_embeddings=2048,
+ do_layer_norm_before=True,
+ _remove_final_layer_norm=False,
+ word_embed_proj_dim=None,
+ dropout=0.1,
+ attention_dropout=0.0,
+ num_attention_heads=12,
+ activation_function="relu",
+ layerdrop=0.0,
+ init_std=0.02,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=2,
+ eos_token_id=2,
+ enable_bias=True,
+ layer_norm_elementwise_affine=True,
+ **kwargs,
+ ):
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ **kwargs,
+ )
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.num_attention_heads = num_attention_heads
+ self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size
+ self.ffn_dim = ffn_dim
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.layerdrop = layerdrop
+ self.use_cache = use_cache
+ self.do_layer_norm_before = do_layer_norm_before
+ # We keep these variables at `True` for backward compatibility.
+ self.enable_bias = enable_bias
+ self.layer_norm_elementwise_affine = layer_norm_elementwise_affine
+
+ # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility
+ # with checkpoints that have been fine-tuned before transformers v4.20.1
+ # see https://github.com/facebookresearch/metaseq/pull/164
+ self._remove_final_layer_norm = _remove_final_layer_norm
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f302b2ec3f44c86c81b0452951e9b9e894a2713
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert OPT checkpoint."""
+
+
+import argparse
+from pathlib import Path
+
+import torch
+
+from transformers import OPTConfig, OPTModel
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+
+def load_checkpoint(checkpoint_path):
+ """Checkpoint path should end in model.pt"""
+ sd = torch.load(checkpoint_path, map_location="cpu")
+ if "model" in sd.keys():
+ sd = torch.load(checkpoint_path, map_location="cpu")["model"]
+
+ # pop unnecessary weights
+ keys_to_delete = [
+ "decoder.version",
+ "decoder.output_projection.weight",
+ ]
+ for key in keys_to_delete:
+ if key in sd:
+ sd.pop(key)
+
+ keys_to_rename = {
+ "decoder.project_in_dim.weight": "decoder.project_in.weight",
+ "decoder.project_out_dim.weight": "decoder.project_out.weight",
+ "decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
+ "decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
+ }
+ for old_key, new_key in keys_to_rename.items():
+ if old_key in sd:
+ sd[new_key] = sd.pop(old_key)
+
+ keys = list(sd.keys())
+ for key in keys:
+ if ".qkv_proj." in key:
+ value = sd[key]
+ # We split QKV in separate Q,K,V
+
+ q_name = key.replace(".qkv_proj.", ".q_proj.")
+ k_name = key.replace(".qkv_proj.", ".k_proj.")
+ v_name = key.replace(".qkv_proj.", ".v_proj.")
+
+ depth = value.shape[0]
+ assert depth % 3 == 0
+ # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
+ # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
+ k, v, q = torch.split(value, depth // 3, dim=0)
+
+ sd[q_name] = q
+ sd[k_name] = k
+ sd[v_name] = v
+ del sd[key]
+
+ return sd
+
+
+@torch.no_grad()
+def convert_opt_checkpoint(checkpoint_path, pytorch_dump_folder_path, config=None):
+ """
+ Copy/paste/tweak model's weights to our BERT structure.
+ """
+ state_dict = load_checkpoint(checkpoint_path)
+
+ if config is not None:
+ config = OPTConfig.from_pretrained(config)
+ else:
+ config = OPTConfig()
+
+ model = OPTModel(config).half().eval()
+ model.load_state_dict(state_dict)
+
+ # Check results
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
+ model.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--fairseq_path",
+ type=str,
+ help=(
+ "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
+ " https://huggingface.co/models?other=opt_metasq"
+ ),
+ )
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
+ args = parser.parse_args()
+ convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_flax_opt.py b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_flax_opt.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d9839f1204860b72ced4e1573ea4b8d0a8fac8a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_flax_opt.py
@@ -0,0 +1,799 @@
+# coding=utf-8
+# Copyright 2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax OPT model."""
+
+from functools import partial
+from typing import Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxMaskedLMOutput
+from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
+from ...utils import add_start_docstrings, logging
+from .configuration_opt import OPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
+_CONFIG_FOR_DOC = "OPTConfig"
+
+
+OPT_START_DOCSTRING = r"""
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`OPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+OPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->OPT
+class FlaxOPTAttention(nn.Module):
+ config: OPTConfig
+ embed_dim: int
+ num_heads: int
+ dropout: float = 0.0
+ causal: bool = False
+ bias: bool = True
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self) -> None:
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=self.bias,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.out_proj = dense()
+
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ key_value_states: Optional[jnp.ndarray] = None,
+ attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.k_proj(key_value_states)
+ value_states = self.v_proj(key_value_states)
+ else:
+ # self_attention
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights
+
+
+class FlaxOPTDecoderLayer(nn.Module):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.hidden_size
+ self.self_attn = FlaxOPTAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.num_attention_heads,
+ dropout=self.config.attention_dropout,
+ causal=True,
+ dtype=self.dtype,
+ )
+ self.do_layer_norm_before = self.config.do_layer_norm_before
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.fc1 = nn.Dense(
+ self.config.ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ init_cache: bool = False,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ init_cache=init_cache,
+ deterministic=deterministic,
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Fully Connected
+ hidden_states_shape = hidden_states.shape
+ hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1])
+ residual = hidden_states
+
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ hidden_states = (residual + hidden_states).reshape(hidden_states_shape)
+
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+class FlaxOPTDecoderLayerCollection(nn.Module):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxOPTDecoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.num_hidden_layers)
+ ]
+ self.layerdrop = self.config.layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ ):
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ outputs = [hidden_states, all_hidden_states, all_self_attns]
+ return outputs
+
+
+class FlaxOPTLearnedPositionalEmbedding(nn.Embed):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def setup(self):
+ self.offset = 2
+ self.embedding = self.param(
+ "embedding", self.embedding_init, (self.num_embeddings + self.offset, self.features), self.param_dtype
+ )
+
+ def __call__(self, positions):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+
+ return super().__call__(positions + self.offset)
+
+
+class FlaxOPTDecoder(nn.Module):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ offset: int = 2
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.hidden_size
+ self.padding_idx = self.config.pad_token_id
+ self.max_target_positions = self.config.max_position_embeddings
+
+ self.embed_tokens = nn.Embed(
+ self.config.vocab_size,
+ self.config.word_embed_proj_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ dtype=self.dtype,
+ )
+
+ self.embed_positions = FlaxOPTLearnedPositionalEmbedding(
+ self.config.max_position_embeddings,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ dtype=self.dtype,
+ )
+
+ if self.config.word_embed_proj_dim != self.config.hidden_size:
+ self.project_in = nn.Dense(self.config.hidden_size, use_bias=False)
+ self.project_out = nn.Dense(self.config.word_embed_proj_dim, use_bias=False)
+
+ else:
+ self.project_in = None
+ self.project_out = None
+
+ # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
+ # with checkpoints that have been fine-tuned before transformers v4.20.1
+ # see https://github.com/facebookresearch/metaseq/pull/164
+ if self.config.do_layer_norm_before and not self.config._remove_final_layer_norm:
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ else:
+ self.final_layer_norm = None
+
+ self.layers = FlaxOPTDecoderLayerCollection(self.config, self.dtype)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids)
+ if self.project_in is not None:
+ inputs_embeds = self.project_in(inputs_embeds)
+
+ positions = self.embed_positions(position_ids)
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_state, all_hidden_states, attentions = self.layers(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ )
+
+ if self.final_layer_norm is not None:
+ hidden_state = self.final_layer_norm(hidden_state)
+
+ if self.project_out is not None:
+ hidden_state = self.project_out(hidden_state)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_state,)
+
+ outputs = [hidden_state, all_hidden_states, attentions]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_state,
+ hidden_states=all_hidden_states,
+ attentions=attentions,
+ )
+
+
+class FlaxOPTPreTrainedModel(FlaxPreTrainedModel):
+ config_class = OPTConfig
+ base_model_prefix: str = "model"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: OPTConfig,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ attention_mask = jnp.ones_like(input_ids)
+
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ module_init_outputs = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ position_ids,
+ return_dict=False,
+ )
+
+ random_params = module_init_outputs["params"]
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ """
+ # init input variables to retrieve cache
+ input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ attention_mask = jnp.ones_like(input_ids, dtype="i4")
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
+ )
+ return unfreeze(init_variables["cache"])
+
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ params: dict = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ dropout_rng: PRNGKey = None,
+ deterministic: bool = True,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+
+ if position_ids is None:
+ position_ids = (attention_mask.cumsum(axis=1) * attention_mask) - 1
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
+ # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
+ # changed by FlaxOPTAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ outputs = self.module.apply(
+ inputs,
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ rngs=rngs,
+ mutable=mutable,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past_key_values = outputs
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past_key_values = outputs
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
+
+ return outputs
+
+
+class FlaxOPTModule(nn.Module):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.decoder = FlaxOPTDecoder(self.config, dtype=self.dtype)
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ init_cache=False,
+ ):
+ decoder_outputs = self.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ )
+
+ if not return_dict:
+ return decoder_outputs
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModel with Bart->OPT
+class FlaxOPTModel(FlaxOPTPreTrainedModel):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ module_class = FlaxOPTModule
+
+
+append_call_sample_docstring(FlaxOPTModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
+
+
+@add_start_docstrings(
+ "The bare OPT Model transformer outputting raw hidden-states without any specific head on top.",
+ OPT_START_DOCSTRING,
+)
+class FlaxOPTForCausalLMModule(nn.Module):
+ config: OPTConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.model = FlaxOPTModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.config.vocab_size,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids,
+ attention_mask,
+ position_ids,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.model.variables["params"]["decoder"]["embed_tokens"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = self.lm_head(hidden_states)
+
+ if not return_dict:
+ return (lm_logits,) + outputs[1:]
+
+ return FlaxMaskedLMOutput(
+ logits=lm_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ OPT Model with a language modeling head on top (linear layer with weights tied to the input embeddings) e.g for
+ autoregressive tasks.
+ """,
+ OPT_START_DOCSTRING,
+)
+class FlaxOPTForCausalLM(FlaxOPTPreTrainedModel):
+ module_class = FlaxOPTForCausalLMModule
+
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
+ # initializing the cache
+ batch_size, seq_length = input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyway.
+ # Thus, we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+
+ if attention_mask is not None:
+ position_ids = attention_mask.cumsum(axis=1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "attention_mask": extended_attention_mask,
+ "position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+append_call_sample_docstring(
+ FlaxOPTForCausalLM,
+ _CHECKPOINT_FOR_DOC,
+ FlaxBaseModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_opt.py b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_opt.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e9e53a2ac3251cc4dabd0306293f362c9c92bab
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_opt.py
@@ -0,0 +1,1456 @@
+# coding=utf-8
+# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch OPT model."""
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutputWithPast,
+ CausalLMOutputWithPast,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutputWithPast,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_opt import OPTConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
+_CONFIG_FOR_DOC = "OPTConfig"
+
+# Base model docstring
+_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
+
+# SequenceClassification docstring
+_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc"
+_SEQ_CLASS_EXPECTED_LOSS = 1.71
+_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
+
+
+from ..deprecated._archive_maps import OPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+class OPTLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim)
+
+ def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ attention_mask = attention_mask.long()
+
+ # create positions depending on attention_mask
+ positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
+
+ # cut positions if `past_key_values_length` is > 0
+ positions = positions[:, past_key_values_length:]
+
+ return super().forward(positions + self.offset)
+
+
+class OPTAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: OPTConfig,
+ is_decoder: bool = False,
+ **kwargs,
+ ):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.dropout = config.attention_dropout
+ self.enable_bias = config.enable_bias
+
+ self.head_dim = self.embed_dim // self.num_heads
+ self.is_causal = True
+
+ if (self.head_dim * self.num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = torch.max(
+ attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
+ )
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
+ if attn_weights.dtype == torch.float16:
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16)
+ else:
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned aross GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class OptFlashAttention2(OPTAttention):
+ """
+ OPT flash attention module. This module inherits from `OPTAttention` as the weights of the module stays untouched.
+ The only required change would be on the forward pass where it needs to correctly call the public API of flash
+ attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, _, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ query_length = query_states.shape[1]
+ tgt_len = key_states.shape[-2]
+
+ # Flash attention requires the input to have the shape
+ # batch_size x seq_length x head_dim x hidden_dim
+ query_states = query_states.view(bsz, query_length, self.num_heads, self.head_dim)
+ key_states = key_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
+ value_states = value_states.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim)
+
+ attn_dropout = self.dropout if self.training else 0.0
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in float16 just to be sure everything works as expected.
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, query_length, dropout=attn_dropout
+ )
+
+ attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim)
+ attn_output = self.out_proj(attn_weights_reshaped)
+
+ if not output_attentions:
+ attn_weights_reshaped = None
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+OPT_ATTENTION_CLASSES = {
+ "eager": OPTAttention,
+ "flash_attention_2": OptFlashAttention2,
+}
+
+
+class OPTDecoderLayer(nn.Module):
+ def __init__(self, config: OPTConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+
+ self.self_attn = OPT_ATTENTION_CLASSES[config._attn_implementation](config=config, is_decoder=True)
+
+ self.do_layer_norm_before = config.do_layer_norm_before
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+
+ self.self_attn_layer_norm = nn.LayerNorm(
+ self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine
+ )
+ self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias)
+ self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Fully Connected
+ hidden_states_shape = hidden_states.shape
+ hidden_states = hidden_states.reshape(-1, hidden_states.size(-1))
+ residual = hidden_states
+
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ hidden_states = (residual + hidden_states).view(hidden_states_shape)
+
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+OPT_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`OPTConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare OPT Model outputting raw hidden-states without any specific head on top.",
+ OPT_START_DOCSTRING,
+)
+class OPTPreTrainedModel(PreTrainedModel):
+ config_class = OPTConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["OPTDecoderLayer"]
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+OPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class OPTDecoder(OPTPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]
+
+ Args:
+ config: OPTConfig
+ """
+
+ def __init__(self, config: OPTConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx)
+ self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)
+
+ if config.word_embed_proj_dim != config.hidden_size:
+ self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False)
+ else:
+ self.project_out = None
+
+ if config.word_embed_proj_dim != config.hidden_size:
+ self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False)
+ else:
+ self.project_in = None
+
+ # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
+ # with checkpoints that have been fine-tuned before transformers v4.20.1
+ # see https://github.com/facebookresearch/metaseq/pull/164
+ if config.do_layer_norm_before and not config._remove_final_layer_norm:
+ self.final_layer_norm = nn.LayerNorm(
+ config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine
+ )
+ else:
+ self.final_layer_norm = None
+
+ self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ batch_size, seq_length = input_shape
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+ # required mask seq length can be calculated via length of past
+ mask_seq_length = past_key_values_length + seq_length
+
+ # embed positions
+ if self._use_flash_attention_2:
+ # 2d mask is passed through the layers
+ causal_attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ attention_mask = (
+ torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+ if attention_mask is None
+ else attention_mask
+ )
+ else:
+ # 4d mask is passed through the layers
+ if attention_mask is None:
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
+ elif attention_mask.shape[1] != mask_seq_length:
+ raise ValueError(
+ f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be "
+ f"{mask_seq_length} (sum of the lengths of current and past inputs)"
+ )
+ causal_attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
+
+ if self.project_in is not None:
+ inputs_embeds = self.project_in(inputs_embeds)
+
+ hidden_states = inputs_embeds + pos_embeds
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask], ["head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ causal_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if self.final_layer_norm is not None:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if self.project_out is not None:
+ hidden_states = self.project_out(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+@add_start_docstrings(
+ "The bare OPT Model outputting raw hidden-states without any specific head on top.",
+ OPT_START_DOCSTRING,
+)
+class OPTModel(OPTPreTrainedModel):
+ def __init__(self, config: OPTConfig):
+ super().__init__(config)
+ self.decoder = OPTDecoder(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs
+
+ return BaseModelOutputWithPast(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ )
+
+
+class OPTForCausalLM(OPTPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = OPTModel(config)
+
+ # the lm_head weight is automatically tied to the embed tokens weight
+ self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OPTForCausalLM
+
+ >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.lm_head(outputs[0]).contiguous()
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The OPT Model transformer with a sequence classification head on top (linear layer).
+
+ [`OPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ OPT_START_DOCSTRING,
+)
+class OPTForSequenceClassification(OPTPreTrainedModel):
+ def __init__(self, config: OPTConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = OPTModel(config)
+ self.score = nn.Linear(config.word_embed_proj_dim, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
+ output_type=SequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+
+@add_start_docstrings(
+ """
+ The OPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ OPT_START_DOCSTRING,
+)
+class OPTForQuestionAnswering(OPTPreTrainedModel):
+ def __init__(self, config: OPTConfig):
+ super().__init__(config)
+ self.model = OPTModel(config)
+ self.qa_outputs = nn.Linear(config.word_embed_proj_dim, 2)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, OPTForQuestionAnswering
+ >>> import torch
+
+ >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
+
+ >>> # note: we are loading a OPTForQuestionAnswering from the hub here,
+ >>> # so the head will be randomly initialized, hence the predictions will be random
+ >>> model = OPTForQuestionAnswering.from_pretrained("facebook/opt-350m")
+
+ >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
+
+ >>> inputs = tokenizer(question, text, return_tensors="pt")
+ >>> with torch.no_grad():
+ ... outputs = model(**inputs)
+
+ >>> answer_start_index = outputs.start_logits.argmax()
+ >>> answer_end_index = outputs.end_logits.argmax()
+
+ >>> answer_offset = len(tokenizer(question)[0])
+
+ >>> predict_answer_tokens = inputs.input_ids[
+ ... 0, answer_offset + answer_start_index : answer_offset + answer_end_index + 1
+ ... ]
+ >>> predicted = tokenizer.decode(predict_answer_tokens)
+ >>> predicted
+ ' a nice puppet'
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ logits = self.qa_outputs(hidden_states)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + transformer_outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
diff --git a/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_tf_opt.py b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_tf_opt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dbad97e08b651dbf44a071eb16965f1c0047f16
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/opt/modeling_tf_opt.py
@@ -0,0 +1,1095 @@
+# coding=utf-8
+# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 OPT model."""
+
+
+from __future__ import annotations
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast
+
+# Public API
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSharedEmbeddings,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_opt import OPTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
+_CONFIG_FOR_DOC = "OPTConfig"
+
+# Base model docstring
+_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
+
+# Causal LM output
+_CAUSAL_LM_EXPECTED_OUTPUT = (
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
+)
+
+LARGE_NEGATIVE = -1e8
+
+
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ # We need triu with k = 1 but TF expects known compile-time dims for that, so we hack around it
+ mask = tf.fill((tgt_len, tgt_len), tf.cast(LARGE_NEGATIVE, tf.float32))
+ mask = tf.linalg.band_part(mask, 0, -1) - tf.linalg.band_part(mask, 0, 0)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+class TFOPTLearnedPositionalEmbedding(keras.layers.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
+ # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
+
+ def call(self, attention_mask, past_key_values_length: int = 0):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ attention_mask = tf.cast(attention_mask, tf.int64)
+
+ # create positions depending on attention_mask
+ positions = tf.math.cumsum(attention_mask, axis=1) * attention_mask - 1
+
+ # cut positions if `past_key_values_length` is > 0
+ positions = positions[:, past_key_values_length:]
+
+ return super().call(positions + self.offset)
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->OPT
+class TFOPTAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+class TFOPTDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: OPTConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.do_layer_norm_before = config.do_layer_norm_before
+ self.embed_dim = config.hidden_size
+ self.self_attn = TFOPTAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ training: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size
+ `(decoder_attention_heads,)`
+ past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ residual = hidden_states
+
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Fully Connected
+ residual = hidden_states
+ # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
+ if self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # 350m applies layer norm AFTER attention
+ if not self.do_layer_norm_before:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ return (hidden_states, self_attn_weights, present_key_value)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+OPT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`OPTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare OPT Model outputting raw hidden-states without any specific head on top.",
+ OPT_START_DOCSTRING,
+)
+class TFOPTPreTrainedModel(TFPreTrainedModel):
+ """
+ TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel
+
+ Args:
+ config: OPTConfig
+ """
+
+ config_class = OPTConfig
+ base_model_prefix = "model"
+
+
+OPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFOPTDecoder(keras.layers.Layer):
+ config_class = OPTConfig
+
+ def __init__(self, config: OPTConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.layerdrop = config.layerdrop
+ num_embeddings = config.max_position_embeddings
+ self.embed_tokens = TFSharedEmbeddings(
+ config.vocab_size, config.word_embed_proj_dim, config.pad_token_id, name="embed_tokens"
+ )
+ self.embed_positions = TFOPTLearnedPositionalEmbedding(
+ num_embeddings,
+ config.hidden_size,
+ name="embed_positions",
+ )
+
+ # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
+ # with checkpoints that have been fine-tuned before transformers v4.20.1
+ # see https://github.com/facebookresearch/metaseq/pull/164
+ if config.do_layer_norm_before and not config._remove_final_layer_norm:
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ else:
+ self.final_layer_norm = None
+
+ if config.word_embed_proj_dim != config.hidden_size:
+ self.project_out = keras.layers.Dense(config.word_embed_proj_dim, name="project_out", use_bias=False)
+ self.project_in = keras.layers.Dense(config.hidden_size, name="project_in", use_bias=False)
+
+ else:
+ self.project_in = None
+ self.project_out = None
+
+ self.layers = [TFOPTDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.embed_tokens.vocab_size = new_embeddings.shape[0]
+ self.embed_tokens.weight = new_embeddings
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length):
+ # create causal mask
+ # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ _, seq_length = input_shape
+ tf.debugging.assert_equal(
+ seq_length + past_key_values_length,
+ shape_list(attention_mask)[1],
+ message="Attention mask shape should be (batch_size, seq_length + past_key_values_length)"
+ f" but is {shape_list(attention_mask)[1]} with input_ids shape {input_shape} and past length"
+ f" {past_key_values_length}.",
+ )
+
+ expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1])
+ if seq_length > 1:
+ combined_attention_mask = (
+ _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + expanded_attn_mask
+ )
+ else:
+ combined_attention_mask = expanded_attn_mask
+
+ return combined_attention_mask
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of
+ shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
+ `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
+ control over how to convert `input_ids` indices into associated vectors than the model's internal
+ embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.bool)
+ else:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask)[1],
+ past_key_values_length + input_shape[1],
+ message=(
+ f"The provided attention mask has length {tf.shape(attention_mask)[1]}, but its length should be "
+ f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)"
+ ),
+ )
+ pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
+
+ attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length)
+
+ if self.project_in is not None:
+ inputs_embeds = self.project_in(inputs_embeds)
+
+ hidden_states = inputs_embeds + pos_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ present_key_values = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ hidden_states, layer_self_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ present_key_values += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if self.final_layer_norm is not None:
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ if self.project_out is not None:
+ hidden_states = self.project_out(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns] if v is not None
+ )
+
+ else:
+ return TFBaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_tokens", None) is not None:
+ with tf.name_scope(self.embed_tokens.name):
+ self.embed_tokens.build(None)
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.config.hidden_size])
+ if getattr(self, "project_out", None) is not None:
+ with tf.name_scope(self.project_out.name):
+ self.project_out.build([None, None, self.config.hidden_size])
+ if getattr(self, "project_in", None) is not None:
+ with tf.name_scope(self.project_in.name):
+ self.project_in.build([None, None, self.config.word_embed_proj_dim])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFOPTMainLayer(keras.layers.Layer):
+ config_class = OPTConfig
+
+ def __init__(self, config: OPTConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.decoder = TFOPTDecoder(config, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.decoder.set_input_embeddings(new_embeddings)
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.decoder(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return outputs
+
+ return TFBaseModelOutputWithPast(
+ last_hidden_state=outputs.last_hidden_state,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare TF OPT Model outputting raw hidden-states without any specific head on top.",
+ OPT_START_DOCSTRING,
+)
+@keras_serializable
+class TFOPTModel(TFOPTPreTrainedModel):
+ config_class = OPTConfig
+
+ def __init__(self, config: OPTConfig, **kwargs):
+ super().__init__(config, **kwargs)
+ self.config = config
+ self.model = TFOPTMainLayer(config, name="model")
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, new_embeddings):
+ self.model.set_input_embeddings(new_embeddings)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return outputs
+
+ return TFBaseModelOutputWithPast(
+ last_hidden_state=outputs.last_hidden_state,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
+ attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
+
+ return TFBaseModelOutputWithPast(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ hidden_states=hs,
+ attentions=attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+@add_start_docstrings(
+ """
+ The OPT Model transformer with a language modeling head on top.
+ """,
+ OPT_START_DOCSTRING,
+)
+@keras_serializable
+class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss):
+ config_class = OPTConfig
+
+ def __init__(self, config: OPTConfig, **kwargs):
+ super().__init__(config, **kwargs)
+ self.config = config
+ self.model = TFOPTMainLayer(config, name="model")
+
+ def get_output_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
+ attention_mask = kwargs.get("attention_mask", None)
+
+ # only last token for inputs_ids if past is defined in kwargs
+ if past_key_values:
+ inputs = tf.expand_dims(inputs[:, -1], -1)
+
+ return {
+ "input_ids": inputs,
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @unpack_inputs
+ @replace_return_docstrings(output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFCausalLMOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_CAUSAL_LM_EXPECTED_OUTPUT,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ logits = self.model.decoder.embed_tokens(outputs[0], mode="linear")
+ loss = None
+ if labels is not None:
+ # shift labels to the left and cut last logit token
+ shifted_logits = logits[:, :-1]
+ labels = labels[:, 1:]
+ loss = self.hf_compute_loss(labels, shifted_logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
+ attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
+
+ return TFCausalLMOutputWithPast(
+ past_key_values=pkv,
+ hidden_states=hs,
+ attentions=attns,
+ loss=output.loss,
+ logits=output.logits,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f083b454d554a09ef2c0479ef7ae7053cc6e023c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2024 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_qwen2_moe": ["QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "Qwen2MoeConfig"],
+}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_qwen2_moe"] = [
+ "Qwen2MoeForCausalLM",
+ "Qwen2MoeModel",
+ "Qwen2MoePreTrainedModel",
+ "Qwen2MoeForSequenceClassification",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_qwen2_moe import QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, Qwen2MoeConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_qwen2_moe import (
+ Qwen2MoeForCausalLM,
+ Qwen2MoeForSequenceClassification,
+ Qwen2MoeModel,
+ Qwen2MoePreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31079d7fa1448dc1cb0081a72d864fb2a5061ba0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2725d83bf3e2cf519a743bcb415990b3f3957c6f
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/configuration_qwen2_moe.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..267256dbbbe9ef6da36a22c820ed2d3aabde5074
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/__pycache__/modeling_qwen2_moe.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3f516ed9c2de4d725b3f8f329768ef71916cb62
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/configuration_qwen2_moe.py
@@ -0,0 +1,175 @@
+# coding=utf-8
+# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Qwen2MoE model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+QWEN2MOE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
+ "Qwen/Qwen1.5-MoE-A2.7B": "https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B/resolve/main/config.json",
+}
+
+
+class Qwen2MoeConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2MoeModel`]. It is used to instantiate a
+ Qwen2MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of
+ Qwen1.5-MoE-A2.7B" [Qwen/Qwen1.5-MoE-A2.7B"](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B").
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 151936):
+ Vocabulary size of the Qwen2MoE model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Qwen2MoeModel`]
+ hidden_size (`int`, *optional*, defaults to 2048):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 5632):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 16):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details checkout [this
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
+ The maximum sequence length that this model might ever be used with.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied.
+ rope_theta (`float`, *optional*, defaults to 10000.0):
+ The base period of the RoPE embeddings.
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
+ Whether to use sliding window attention.
+ sliding_window (`int`, *optional*, defaults to 4096):
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
+ max_window_layers (`int`, *optional*, defaults to 28):
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
+ The frequency of the MoE layer.
+ moe_intermediate_size (`int`, *optional*, defaults to 1408):
+ Intermediate size of the routed expert.
+ shared_expert_intermediate_size (`int`, *optional*, defaults to 5632):
+ Intermediate size of the shared expert.
+ num_experts_per_tok (`int`, *optional*, defaults to 4):
+ Number of selected experts.
+ num_experts (`int`, *optional*, defaults to 60):
+ Number of routed experts.
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the topk probabilities.
+ output_router_logits (`bool`, *optional*, defaults to `False`):
+ Whether or not the router logits should be returned by the model. Enabeling this will also
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
+ The aux loss factor for the total loss.
+
+ ```python
+ >>> from transformers import Qwen2MoeModel, Qwen2MoeConfig
+
+ >>> # Initializing a Qwen2MoE style configuration
+ >>> configuration = Qwen2MoeConfig()
+
+ >>> # Initializing a model from the Qwen1.5-MoE-A2.7B" style configuration
+ >>> model = Qwen2MoeModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_moe"
+ keys_to_ignore_at_inference = ["past_key_values"]
+
+ def __init__(
+ self,
+ vocab_size=151936,
+ hidden_size=2048,
+ intermediate_size=5632,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ num_key_value_heads=16,
+ hidden_act="silu",
+ max_position_embeddings=32768,
+ initializer_range=0.02,
+ rms_norm_eps=1e-6,
+ use_cache=True,
+ tie_word_embeddings=False,
+ rope_theta=10000.0,
+ use_sliding_window=False,
+ sliding_window=4096,
+ max_window_layers=28,
+ attention_dropout=0.0,
+ decoder_sparse_step=1,
+ moe_intermediate_size=1408,
+ shared_expert_intermediate_size=5632,
+ num_experts_per_tok=4,
+ num_experts=60,
+ norm_topk_prob=False,
+ output_router_logits=False,
+ router_aux_loss_coef=0.001,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.use_sliding_window = use_sliding_window
+ self.sliding_window = sliding_window
+ self.max_window_layers = max_window_layers
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.rope_theta = rope_theta
+ self.attention_dropout = attention_dropout
+
+ # MoE arguments
+ self.decoder_sparse_step = decoder_sparse_step
+ self.moe_intermediate_size = moe_intermediate_size
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
+ self.num_experts_per_tok = num_experts_per_tok
+ self.num_experts = num_experts
+ self.norm_topk_prob = norm_topk_prob
+ self.output_router_logits = output_router_logits
+ self.router_aux_loss_coef = router_aux_loss_coef
+
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings,
+ **kwargs,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py
new file mode 100644
index 0000000000000000000000000000000000000000..70072c91720a57f44613e404757003973e2b73db
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/qwen2_moe/modeling_qwen2_moe.py
@@ -0,0 +1,1595 @@
+# coding=utf-8
+# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Qwen2MoE model."""
+import inspect
+import math
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
+from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast, SequenceClassifierOutputWithPast
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_qwen2_moe import Qwen2MoeConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+ _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "Qwen/Qwen1.5-MoE-A2.7B"
+_CONFIG_FOR_DOC = "Qwen2MoeConfig"
+
+QWEN2MOE_PRETRAINED_MODEL_ARCHIVE_LIST = [
+ "Qwen/Qwen1.5-MoE-A2.7B",
+ # See all Qwen2 models at https://huggingface.co/models?filter=qwen2
+]
+
+
+# Copied from transformers.models.mixtral.modeling_mixtral.load_balancing_loss_func
+def load_balancing_loss_func(
+ gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None
+) -> float:
+ r"""
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
+
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
+ experts is too unbalanced.
+
+ Args:
+ gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
+ shape [batch_size X sequence_length, num_experts].
+ attention_mask (`torch.Tensor`, None):
+ The attention_mask used in forward function
+ shape [batch_size X sequence_length] if not None.
+ num_experts (`int`, *optional*):
+ Number of experts
+
+ Returns:
+ The auxiliary loss.
+ """
+ if gate_logits is None or not isinstance(gate_logits, tuple):
+ return 0
+
+ if isinstance(gate_logits, tuple):
+ compute_device = gate_logits[0].device
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
+
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
+
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
+
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
+
+ if attention_mask is None:
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
+ else:
+ batch_size, sequence_length = attention_mask.shape
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
+ expert_attention_mask = (
+ attention_mask[None, :, :, None, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
+ .reshape(-1, top_k, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the percentage of tokens routed to each experts
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
+ expert_attention_mask, dim=0
+ )
+
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
+ router_per_expert_attention_mask = (
+ attention_mask[None, :, :, None]
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
+ .reshape(-1, num_experts)
+ .to(compute_device)
+ )
+
+ # Compute the average probability of routing to these experts
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
+ router_per_expert_attention_mask, dim=0
+ )
+
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
+ return overall_loss * num_experts
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2Moe
+class Qwen2MoeRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ Qwen2MoeRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Qwen2Moe
+class Qwen2MoeRotaryEmbedding(nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.outer(t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# Modified from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2Moe
+class Qwen2MoeMLP(nn.Module):
+ def __init__(self, config, intermediate_size=None):
+ super().__init__()
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, x):
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+# Copied from transformers.models.llama.modeling_llama.repeat_kv
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2Attention with Qwen2->Qwen2Moe
+class Qwen2MoeAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
+ and "Generating Long Sequences with Sparse Transformers".
+ """
+
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.hidden_size // self.num_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.max_position_embeddings = config.max_position_embeddings
+ self.rope_theta = config.rope_theta
+ self.is_causal = True
+ self.attention_dropout = config.attention_dropout
+
+ if (self.head_dim * self.num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+
+ self.rotary_emb = Qwen2MoeRotaryEmbedding(
+ self.head_dim,
+ max_position_embeddings=self.max_position_embeddings,
+ base=self.rope_theta,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
+
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ attn_weights = attn_weights + attention_mask
+
+ # upcast attention to fp32
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+
+# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2FlashAttention2 with Qwen2->Qwen2Moe
+class Qwen2MoeFlashAttention2(Qwen2MoeAttention):
+ """
+ Qwen2Moe flash attention module, following Qwen2Moe attention module. This module inherits from `Qwen2MoeAttention`
+ as the weights of the module stays untouched. The only required change would be on the forward pass
+ where it needs to correctly call the public API of flash attention and deal with padding tokens
+ in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
+ config.max_window_layers layers.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ **kwargs,
+ ):
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
+ )
+
+ # overwrite attention_mask with padding_mask
+ attention_mask = kwargs.pop("padding_mask")
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ if self.layer_idx is None:
+ raise ValueError(
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
+ "with a layer index."
+ )
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ use_sliding_windows = (
+ _flash_supports_window_size
+ and getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ and self.config.use_sliding_window
+ )
+
+ if not _flash_supports_window_size:
+ logger.warning_once(
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
+ " make sure to upgrade flash-attn library."
+ )
+
+ if past_key_value is not None:
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
+ if (
+ getattr(self.config, "sliding_window", None) is not None
+ and kv_seq_len > self.config.sliding_window
+ and cache_has_contents
+ ):
+ slicing_tokens = 1 - self.config.sliding_window
+
+ past_key = past_key_value[self.layer_idx][0]
+ past_value = past_key_value[self.layer_idx][1]
+
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
+
+ if past_key.shape[-2] != self.config.sliding_window - 1:
+ raise ValueError(
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
+ f" {past_key.shape}"
+ )
+
+ if attention_mask is not None:
+ attention_mask = attention_mask[:, slicing_tokens:]
+ attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
+
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ # repeat k/v heads if n_kv_heads < n_heads
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in float16 just to be sure everything works as expected.
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ # Reashape to the expected shape for Flash Attention
+ query_states = query_states.transpose(1, 2)
+ key_states = key_states.transpose(1, 2)
+ value_states = value_states.transpose(1, 2)
+
+ attn_output = self._flash_attention_forward(
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ q_len,
+ dropout=dropout_rate,
+ use_sliding_windows=use_sliding_windows,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
+ attn_output = self.o_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ def _flash_attention_forward(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ query_length,
+ dropout=0.0,
+ softmax_scale=None,
+ use_sliding_windows=False,
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ use_sliding_windows (`bool`, *optional*):
+ Whether to activate sliding window attention.
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Decide whether to use SWA or not by layer index.
+ if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
+ use_sliding_windows = False
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ if not use_sliding_windows:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ if not use_sliding_windows:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+ else:
+ attn_output = flash_attn_func(
+ query_states,
+ key_states,
+ value_states,
+ dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ window_size=(self.config.sliding_window, self.config.sliding_window),
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
+
+ # On the first iteration we need to properly re-create the padding mask
+ # by slicing it on the proper place
+ if kv_seq_len != attention_mask.shape[-1]:
+ attention_mask_num_tokens = attention_mask.shape[-1]
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
+
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
+
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+# Copied from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Qwen2Moe
+class Qwen2MoeSdpaAttention(Qwen2MoeAttention):
+ """
+ Qwen2Moe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
+ `Qwen2MoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
+ SDPA API.
+ """
+
+ # Adapted from Qwen2MoeAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ if output_attentions:
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
+ logger.warning_once(
+ "Qwen2MoeModel is using Qwen2MoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
+ )
+ return super().forward(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
+
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+
+ if past_key_value is not None:
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.attention_dropout if self.training else 0.0,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ return attn_output, None, past_key_value
+
+
+QWEN2MOE_ATTENTION_CLASSES = {
+ "eager": Qwen2MoeAttention,
+ "flash_attention_2": Qwen2MoeFlashAttention2,
+ "sdpa": Qwen2MoeSdpaAttention,
+}
+
+
+class Qwen2MoeSparseMoeBlock(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.num_experts = config.num_experts
+ self.top_k = config.num_experts_per_tok
+ self.norm_topk_prob = config.norm_topk_prob
+
+ # gating
+ self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
+ self.experts = nn.ModuleList(
+ [Qwen2MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
+ )
+
+ self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size)
+ self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ """ """
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
+ hidden_states = hidden_states.view(-1, hidden_dim)
+ # router_logits: (batch * sequence_length, n_experts)
+ router_logits = self.gate(hidden_states)
+
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
+ routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
+ if self.norm_topk_prob:
+ routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
+ # we cast back to the input dtype
+ routing_weights = routing_weights.to(hidden_states.dtype)
+
+ final_hidden_states = torch.zeros(
+ (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
+ )
+
+ # One hot encode the selected experts to create an expert mask
+ # this will be used to easily index which expert is going to be sollicitated
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
+
+ # Loop over all available experts in the model and perform the computation on each expert
+ for expert_idx in range(self.num_experts):
+ expert_layer = self.experts[expert_idx]
+ idx, top_x = torch.where(expert_mask[expert_idx])
+
+ # Index the correct hidden states and compute the expert hidden state for
+ # the current expert. We need to make sure to multiply the output hidden
+ # states by `routing_weights` on the corresponding tokens (top-1 and top-2)
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
+
+ # However `index_add_` only support torch tensors for indexing so we'll use
+ # the `top_x` tensor here.
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
+
+ shared_expert_output = self.shared_expert(hidden_states)
+ shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
+
+ final_hidden_states = final_hidden_states + shared_expert_output
+
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
+ return final_hidden_states, router_logits
+
+
+class Qwen2MoeDecoderLayer(nn.Module):
+ def __init__(self, config: Qwen2MoeConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ self.self_attn = QWEN2MOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
+
+ if config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0:
+ self.mlp = Qwen2MoeSparseMoeBlock(config)
+ else:
+ self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size)
+
+ self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ output_router_logits: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ **kwargs,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ if "padding_mask" in kwargs:
+ warnings.warn(
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
+ "Please make sure use `attention_mask` instead.`"
+ )
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss,
+ and should not be returned during inference.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+
+ hidden_states = self.mlp(hidden_states)
+ if isinstance(hidden_states, tuple):
+ hidden_states, router_logits = hidden_states
+ else:
+ router_logits = None
+
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ if output_router_logits:
+ outputs += (router_logits,)
+
+ return outputs
+
+
+QWEN2MOE_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`Qwen2MoeConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
+ QWEN2MOE_START_DOCSTRING,
+)
+class Qwen2MoePreTrainedModel(PreTrainedModel):
+ config_class = Qwen2MoeConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Qwen2MoeDecoderLayer"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+ _supports_sdpa = True
+ _supports_cache_class = True
+
+ def _init_weights(self, module):
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+QWEN2MOE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
+
+ Two formats are allowed:
+ - a [`~cache_utils.Cache`] instance;
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
+ cache format.
+
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
+ legacy cache format will be returned.
+
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
+ of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ output_router_logits (`bool`, *optional*):
+ Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
+ should not be returned during inference.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Qwen2MoE Model outputting raw hidden-states without any specific head on top.",
+ QWEN2MOE_START_DOCSTRING,
+)
+class Qwen2MoeModel(Qwen2MoePreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2MoeDecoderLayer`]
+
+ Args:
+ config: Qwen2MoeConfig
+ """
+
+ def __init__(self, config: Qwen2MoeConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._attn_implementation = config._attn_implementation
+ self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ past_key_values_length = 0
+
+ if use_cache:
+ use_legacy_cache = not isinstance(past_key_values, Cache)
+ if use_legacy_cache:
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
+ past_key_values_length = past_key_values.get_usable_length(seq_length)
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
+ else:
+ position_ids = position_ids.view(-1, seq_length).long()
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache:
+ is_padding_right = attention_mask[:, -1].sum().item() != batch_size
+ if is_padding_right:
+ raise ValueError(
+ "You are attempting to perform batched generation with padding_side='right'"
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen2MoE. Make sure to "
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
+ )
+
+ if self._attn_implementation == "flash_attention_2":
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ elif self._attn_implementation == "sdpa" and not output_attentions:
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
+ # the manual implementation that requires a 4D causal mask in all cases.
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ sliding_window=self.config.sliding_window,
+ )
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask,
+ (batch_size, seq_length),
+ inputs_embeds,
+ past_key_values_length,
+ sliding_window=self.config.sliding_window,
+ )
+
+ hidden_states = inputs_embeds
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_router_logits = () if output_router_logits else None
+ next_decoder_cache = None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_values,
+ output_attentions,
+ output_router_logits,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ output_router_logits=output_router_logits,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if output_router_logits and layer_outputs[-1] is not None:
+ all_router_logits += (layer_outputs[-1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = None
+ if use_cache:
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_router_logits]
+ if v is not None
+ )
+ return MoeModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ router_logits=all_router_logits,
+ )
+
+
+class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.model = Qwen2MoeModel(config)
+ self.vocab_size = config.vocab_size
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.router_aux_loss_coef = config.router_aux_loss_coef
+ self.num_experts = config.num_experts
+ self.num_experts_per_tok = config.num_experts_per_tok
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=MoeCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_router_logits: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
+
+ >>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
+
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_router_logits = (
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
+ )
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_router_logits=output_router_logits,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
+ shift_labels = shift_labels.view(-1)
+ # Enable model parallelism
+ shift_labels = shift_labels.to(shift_logits.device)
+ loss = loss_fct(shift_logits, shift_labels)
+
+ aux_loss = None
+ if output_router_logits:
+ aux_loss = load_balancing_loss_func(
+ outputs.router_logits if return_dict else outputs[-1],
+ self.num_experts,
+ self.num_experts_per_tok,
+ attention_mask,
+ )
+ if labels is not None:
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ if output_router_logits:
+ output = (aux_loss,) + output
+ return (loss,) + output if loss is not None else output
+
+ return MoeCausalLMOutputWithPast(
+ loss=loss,
+ aux_loss=aux_loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ router_logits=outputs.router_logits,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
+ ):
+ # Omit tokens covered by past_key_values
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ max_cache_length = past_key_values.get_max_length()
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+ max_cache_length = None
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
+ if (
+ max_cache_length is not None
+ and attention_mask is not None
+ and cache_length + input_ids.shape[1] > max_cache_length
+ ):
+ attention_mask = attention_mask[:, -max_cache_length:]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The Qwen2MoE Model transformer with a sequence classification head on top (linear layer).
+
+ [`Qwen2MoeForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ QWEN2MOE_START_DOCSTRING,
+)
+# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Qwen2Moe, LLAMA->QWEN2MOE
+class Qwen2MoeForSequenceClassification(Qwen2MoePreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.model = Qwen2MoeModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(QWEN2MOE_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..20606dda51ef8746448a7561baf60555c0192321
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__init__.py
@@ -0,0 +1,96 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_speecht5": [
+ "SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
+ "SpeechT5Config",
+ "SpeechT5HifiGanConfig",
+ ],
+ "feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
+ "processing_speecht5": ["SpeechT5Processor"],
+}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_speecht5"] = ["SpeechT5Tokenizer"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_speecht5"] = [
+ "SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "SpeechT5ForSpeechToText",
+ "SpeechT5ForSpeechToSpeech",
+ "SpeechT5ForTextToSpeech",
+ "SpeechT5Model",
+ "SpeechT5PreTrainedModel",
+ "SpeechT5HifiGan",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_speecht5 import (
+ SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
+ SpeechT5Config,
+ SpeechT5HifiGanConfig,
+ )
+ from .feature_extraction_speecht5 import SpeechT5FeatureExtractor
+ from .processing_speecht5 import SpeechT5Processor
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_speecht5 import SpeechT5Tokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_speecht5 import (
+ SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
+ SpeechT5ForSpeechToSpeech,
+ SpeechT5ForSpeechToText,
+ SpeechT5ForTextToSpeech,
+ SpeechT5HifiGan,
+ SpeechT5Model,
+ SpeechT5PreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a25c0c1e1b9ca52470043bea20a95f02434644e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5b209b0a7bd827c011a8795d18a1749b2a4da57
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/configuration_speecht5.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_hifigan.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_hifigan.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a6817b499c086a33a5df580a3ac2e380321b3070
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_hifigan.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_speecht5_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_speecht5_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49ce564808381e275a49e73098b2d60eb9a888ee
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/convert_speecht5_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/feature_extraction_speecht5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/feature_extraction_speecht5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a44cd9dd6c5e42d3b167eab05bfdd722b888a8f8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/feature_extraction_speecht5.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f25bc43ffc8e6f3a4d6a22b1db39552d0516de7d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/number_normalizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/number_normalizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74cffd9b82f7d3eac72eb3af895657d486fe8875
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/number_normalizer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/processing_speecht5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/processing_speecht5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c3de6586cc252284de850834fd7c2363e1c93f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/processing_speecht5.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/tokenization_speecht5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/tokenization_speecht5.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2746d755b581d2cfcfa56391030a6022291448be
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/tokenization_speecht5.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/configuration_speecht5.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/configuration_speecht5.py
new file mode 100644
index 0000000000000000000000000000000000000000..36cb4995a83f0519592a89174f0bd51ecb85deab
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/configuration_speecht5.py
@@ -0,0 +1,427 @@
+# coding=utf-8
+# Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" SpeechT5 model configuration"""
+
+import functools
+import operator
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP = {
+ "microsoft/speecht5_hifigan": "https://huggingface.co/microsoft/speecht5_hifigan/resolve/main/config.json",
+}
+
+
+class SpeechT5Config(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SpeechT5Model`]. It is used to instantiate a
+ SpeechT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the SpeechT5
+ [microsoft/speecht5_asr](https://huggingface.co/microsoft/speecht5_asr) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 81):
+ Vocabulary size of the SpeechT5 model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed to the forward method of [`SpeechT5Model`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ encoder_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layers (`int`, *optional*, defaults to 6):
+ Number of hidden layers in the Transformer decoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ positional_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for the text position encoding layers.
+ hidden_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for activations inside the fully connected layer.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ feat_extract_norm (`str`, *optional*, defaults to `"group"`):
+ The norm to be applied to 1D convolutional layers in the speech encoder pre-net. One of `"group"` for group
+ normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
+ convolutional layers.
+ feat_proj_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for output of the speech encoder pre-net.
+ feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the 1D convolutional layers of the feature
+ extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
+ A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
+ speech encoder pre-net. The length of *conv_dim* defines the number of 1D convolutional layers.
+ conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the speech encoder pre-net. The
+ length of *conv_stride* defines the number of convolutional layers and has to match the length of
+ *conv_dim*.
+ conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the speech encoder pre-net.
+ The length of *conv_kernel* defines the number of convolutional layers and has to match the length of
+ *conv_dim*.
+ conv_bias (`bool`, *optional*, defaults to `False`):
+ Whether the 1D convolutional layers have a bias.
+ num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
+ Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
+ embeddings layer.
+ num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
+ Number of groups of 1D convolutional positional embeddings layer.
+ apply_spec_augment (`bool`, *optional*, defaults to `True`):
+ Whether to apply *SpecAugment* data augmentation to the outputs of the speech encoder pre-net. For
+ reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
+ Recognition](https://arxiv.org/abs/1904.08779).
+ mask_time_prob (`float`, *optional*, defaults to 0.05):
+ Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
+ procecure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
+ reasoning from the propability of each feature vector to be chosen as the start of the vector span to be
+ masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
+ actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
+ mask_time_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the time axis.
+ mask_time_min_masks (`int`, *optional*, defaults to 2),:
+ The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
+ irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
+ mask_time_min_masks''
+ mask_feature_prob (`float`, *optional*, defaults to 0.0):
+ Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
+ masking procecure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
+ the axis. If reasoning from the propability of each feature vector to be chosen as the start of the vector
+ span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
+ may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
+ True`.
+ mask_feature_length (`int`, *optional*, defaults to 10):
+ Length of vector span along the feature axis.
+ mask_feature_min_masks (`int`, *optional*, defaults to 0),:
+ The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
+ step, irrespectively of `mask_feature_prob`. Only relevant if
+ ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
+ num_mel_bins (`int`, *optional*, defaults to 80):
+ Number of mel features used per input features. Used by the speech decoder pre-net. Should correspond to
+ the value used in the [`SpeechT5Processor`] class.
+ speech_decoder_prenet_layers (`int`, *optional*, defaults to 2):
+ Number of layers in the speech decoder pre-net.
+ speech_decoder_prenet_units (`int`, *optional*, defaults to 256):
+ Dimensionality of the layers in the speech decoder pre-net.
+ speech_decoder_prenet_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout probability for the speech decoder pre-net layers.
+ speaker_embedding_dim (`int`, *optional*, defaults to 512):
+ Dimensionality of the *XVector* embedding vectors.
+ speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
+ Number of layers in the speech decoder post-net.
+ speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
+ Dimensionality of the layers in the speech decoder post-net.
+ speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
+ Number of convolutional filter channels in the speech decoder post-net.
+ speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
+ The dropout probability for the speech decoder post-net layers.
+ reduction_factor (`int`, *optional*, defaults to 2):
+ Spectrogram length reduction factor for the speech decoder inputs.
+ max_speech_positions (`int`, *optional*, defaults to 4000):
+ The maximum sequence length of speech features that this model might ever be used with.
+ max_text_positions (`int`, *optional*, defaults to 450):
+ The maximum sequence length of text features that this model might ever be used with.
+ encoder_max_relative_position (`int`, *optional*, defaults to 160):
+ Maximum distance for relative position embedding in the encoder.
+ use_guided_attention_loss (`bool`, *optional*, defaults to `True`):
+ Whether to apply guided attention loss while training the TTS model.
+ guided_attention_loss_num_heads (`int`, *optional*, defaults to 2):
+ Number of attention heads the guided attention loss will be applied to. Use -1 to apply this loss to all
+ attention heads.
+ guided_attention_loss_sigma (`float`, *optional*, defaults to 0.4):
+ Standard deviation for guided attention loss.
+ guided_attention_loss_scale (`float`, *optional*, defaults to 10.0):
+ Scaling coefficient for guided attention loss (also known as lambda).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechT5Model, SpeechT5Config
+
+ >>> # Initializing a "microsoft/speecht5_asr" style configuration
+ >>> configuration = SpeechT5Config()
+
+ >>> # Initializing a model (with random weights) from the "microsoft/speecht5_asr" style configuration
+ >>> model = SpeechT5Model(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "speecht5"
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"}
+
+ def __init__(
+ self,
+ vocab_size=81,
+ hidden_size=768,
+ encoder_layers=12,
+ encoder_attention_heads=12,
+ encoder_ffn_dim=3072,
+ encoder_layerdrop=0.1,
+ decoder_layers=6,
+ decoder_ffn_dim=3072,
+ decoder_attention_heads=12,
+ decoder_layerdrop=0.1,
+ hidden_act="gelu",
+ positional_dropout=0.1,
+ hidden_dropout=0.1,
+ attention_dropout=0.1,
+ activation_dropout=0.1,
+ initializer_range=0.02,
+ layer_norm_eps=1e-5,
+ scale_embedding=False,
+ feat_extract_norm="group",
+ feat_proj_dropout=0.0,
+ feat_extract_activation="gelu",
+ conv_dim=(512, 512, 512, 512, 512, 512, 512),
+ conv_stride=(5, 2, 2, 2, 2, 2, 2),
+ conv_kernel=(10, 3, 3, 3, 3, 2, 2),
+ conv_bias=False,
+ num_conv_pos_embeddings=128,
+ num_conv_pos_embedding_groups=16,
+ apply_spec_augment=True,
+ mask_time_prob=0.05,
+ mask_time_length=10,
+ mask_time_min_masks=2,
+ mask_feature_prob=0.0,
+ mask_feature_length=10,
+ mask_feature_min_masks=0,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ decoder_start_token_id=2,
+ num_mel_bins=80,
+ speech_decoder_prenet_layers=2,
+ speech_decoder_prenet_units=256,
+ speech_decoder_prenet_dropout=0.5,
+ speaker_embedding_dim=512,
+ speech_decoder_postnet_layers=5,
+ speech_decoder_postnet_units=256,
+ speech_decoder_postnet_kernel=5,
+ speech_decoder_postnet_dropout=0.5,
+ reduction_factor=2,
+ max_speech_positions=4000,
+ max_text_positions=450,
+ encoder_max_relative_position=160,
+ use_guided_attention_loss=True,
+ guided_attention_loss_num_heads=2,
+ guided_attention_loss_sigma=0.4,
+ guided_attention_loss_scale=10.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.encoder_layers = encoder_layers
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_attention_heads = encoder_attention_heads
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layers = decoder_layers
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_attention_heads = decoder_attention_heads
+ self.decoder_layerdrop = decoder_layerdrop
+ self.hidden_act = hidden_act
+ self.positional_dropout = positional_dropout
+ self.hidden_dropout = hidden_dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.scale_embedding = scale_embedding
+
+ self.feat_extract_norm = feat_extract_norm
+ self.feat_proj_dropout = feat_proj_dropout
+ self.feat_extract_activation = feat_extract_activation
+ self.conv_dim = list(conv_dim)
+ self.conv_stride = list(conv_stride)
+ self.conv_kernel = list(conv_kernel)
+ self.conv_bias = conv_bias
+ self.num_conv_pos_embeddings = num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+
+ if (
+ (len(self.conv_stride) != self.num_feat_extract_layers)
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
+ ):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
+ )
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = apply_spec_augment
+ self.mask_time_prob = mask_time_prob
+ self.mask_time_length = mask_time_length
+ self.mask_time_min_masks = mask_time_min_masks
+ self.mask_feature_prob = mask_feature_prob
+ self.mask_feature_length = mask_feature_length
+ self.mask_feature_min_masks = mask_feature_min_masks
+
+ self.num_mel_bins = num_mel_bins
+ self.speech_decoder_prenet_layers = speech_decoder_prenet_layers
+ self.speech_decoder_prenet_units = speech_decoder_prenet_units
+ self.speech_decoder_prenet_dropout = speech_decoder_prenet_dropout
+ self.speaker_embedding_dim = speaker_embedding_dim
+
+ self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
+ self.speech_decoder_postnet_units = speech_decoder_postnet_units
+ self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
+ self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
+ self.reduction_factor = reduction_factor
+
+ self.max_speech_positions = max_speech_positions
+ self.max_text_positions = max_text_positions
+ self.encoder_max_relative_position = encoder_max_relative_position
+
+ self.use_guided_attention_loss = use_guided_attention_loss
+ self.guided_attention_loss_num_heads = guided_attention_loss_num_heads
+ self.guided_attention_loss_sigma = guided_attention_loss_sigma
+ self.guided_attention_loss_scale = guided_attention_loss_scale
+
+ self.use_cache = use_cache
+ self.is_encoder_decoder = is_encoder_decoder
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ **kwargs,
+ )
+
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
+
+
+class SpeechT5HifiGanConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`SpeechT5HifiGanModel`]. It is used to instantiate
+ a SpeechT5 HiFi-GAN vocoder model according to the specified arguments, defining the model architecture.
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the SpeechT5
+ [microsoft/speecht5_hifigan](https://huggingface.co/microsoft/speecht5_hifigan) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ model_in_dim (`int`, *optional*, defaults to 80):
+ The number of frequency bins in the input log-mel spectrogram.
+ sampling_rate (`int`, *optional*, defaults to 16000):
+ The sampling rate at which the output audio will be generated, expressed in hertz (Hz).
+ upsample_initial_channel (`int`, *optional*, defaults to 512):
+ The number of input channels into the upsampling network.
+ upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
+ A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The
+ length of *upsample_rates* defines the number of convolutional layers and has to match the length of
+ *upsample_kernel_sizes*.
+ upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 8, 8]`):
+ A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The
+ length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of
+ *upsample_rates*.
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):
+ A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field
+ fusion (MRF) module.
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
+ A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
+ multi-receptive field fusion (MRF) module.
+ initializer_range (`float`, *optional*, defaults to 0.01):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ leaky_relu_slope (`float`, *optional*, defaults to 0.1):
+ The angle of the negative slope used by the leaky ReLU activation.
+ normalize_before (`bool`, *optional*, defaults to `True`):
+ Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance.
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechT5HifiGan, SpeechT5HifiGanConfig
+
+ >>> # Initializing a "microsoft/speecht5_hifigan" style configuration
+ >>> configuration = SpeechT5HifiGanConfig()
+
+ >>> # Initializing a model (with random weights) from the "microsoft/speecht5_hifigan" style configuration
+ >>> model = SpeechT5HifiGan(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "hifigan"
+
+ def __init__(
+ self,
+ model_in_dim=80,
+ sampling_rate=16000,
+ upsample_initial_channel=512,
+ upsample_rates=[4, 4, 4, 4],
+ upsample_kernel_sizes=[8, 8, 8, 8],
+ resblock_kernel_sizes=[3, 7, 11],
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ initializer_range=0.01,
+ leaky_relu_slope=0.1,
+ normalize_before=True,
+ **kwargs,
+ ):
+ self.model_in_dim = model_in_dim
+ self.sampling_rate = sampling_rate
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_rates = upsample_rates
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.initializer_range = initializer_range
+ self.leaky_relu_slope = leaky_relu_slope
+ self.normalize_before = normalize_before
+ super().__init__(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_hifigan.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_hifigan.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d78bb73af3022924a34b8fdeafc7bc18b9f163b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_hifigan.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert SpeechT5 HiFi-GAN checkpoint."""
+
+import argparse
+
+import numpy as np
+import torch
+
+from transformers import SpeechT5HifiGan, SpeechT5HifiGanConfig, logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.speecht5")
+
+
+def load_weights(checkpoint, hf_model, config):
+ hf_model.apply_weight_norm()
+
+ hf_model.conv_pre.weight_g.data = checkpoint["input_conv.weight_g"]
+ hf_model.conv_pre.weight_v.data = checkpoint["input_conv.weight_v"]
+ hf_model.conv_pre.bias.data = checkpoint["input_conv.bias"]
+
+ for i in range(len(config.upsample_rates)):
+ hf_model.upsampler[i].weight_g.data = checkpoint[f"upsamples.{i}.1.weight_g"]
+ hf_model.upsampler[i].weight_v.data = checkpoint[f"upsamples.{i}.1.weight_v"]
+ hf_model.upsampler[i].bias.data = checkpoint[f"upsamples.{i}.1.bias"]
+
+ for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
+ for j in range(len(config.resblock_dilation_sizes)):
+ hf_model.resblocks[i].convs1[j].weight_g.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
+ hf_model.resblocks[i].convs1[j].weight_v.data = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
+ hf_model.resblocks[i].convs1[j].bias.data = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
+
+ hf_model.resblocks[i].convs2[j].weight_g.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
+ hf_model.resblocks[i].convs2[j].weight_v.data = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
+ hf_model.resblocks[i].convs2[j].bias.data = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
+
+ hf_model.conv_post.weight_g.data = checkpoint["output_conv.1.weight_g"]
+ hf_model.conv_post.weight_v.data = checkpoint["output_conv.1.weight_v"]
+ hf_model.conv_post.bias.data = checkpoint["output_conv.1.bias"]
+
+ hf_model.remove_weight_norm()
+
+
+@torch.no_grad()
+def convert_hifigan_checkpoint(
+ checkpoint_path,
+ stats_path,
+ pytorch_dump_folder_path,
+ config_path=None,
+ repo_id=None,
+):
+ if config_path is not None:
+ config = SpeechT5HifiGanConfig.from_pretrained(config_path)
+ else:
+ config = SpeechT5HifiGanConfig()
+
+ model = SpeechT5HifiGan(config)
+
+ orig_checkpoint = torch.load(checkpoint_path)
+ load_weights(orig_checkpoint["model"]["generator"], model, config)
+
+ stats = np.load(stats_path)
+ mean = stats[0].reshape(-1)
+ scale = stats[1].reshape(-1)
+ model.mean = torch.from_numpy(mean).float()
+ model.scale = torch.from_numpy(scale).float()
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ model.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_hifigan_checkpoint(
+ args.checkpoint_path,
+ args.stats_path,
+ args.pytorch_dump_folder_path,
+ args.config_path,
+ args.push_to_hub,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..20dea800d9d18fcb7687f0e5b8c5ebfa802fd3fd
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/convert_speecht5_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,401 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert SpeechT5 checkpoint."""
+
+import argparse
+
+import torch
+
+from transformers import (
+ SpeechT5Config,
+ SpeechT5FeatureExtractor,
+ SpeechT5ForSpeechToSpeech,
+ SpeechT5ForSpeechToText,
+ SpeechT5ForTextToSpeech,
+ SpeechT5Processor,
+ SpeechT5Tokenizer,
+ logging,
+)
+from transformers.tokenization_utils import AddedToken
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.speecht5")
+
+MAPPING_SPEECH_ENCODER_PRENET = {
+ "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
+ "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
+ "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
+ "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
+}
+MAPPING_TEXT_ENCODER_PRENET = {
+ "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
+ "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
+}
+MAPPING_SPEECH_DECODER_PRENET = {
+ "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
+ "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
+ "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
+ "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
+ "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
+}
+MAPPING_SPEECH_DECODER_POSTNET = {
+ "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
+ "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
+ "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
+ "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
+ "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
+ "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
+ "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
+ "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
+ "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
+ "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
+ "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
+ "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
+}
+MAPPING_TEXT_DECODER_PRENET = {
+ "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
+}
+MAPPING_TEXT_DECODER_POSTNET = {
+ "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
+}
+MAPPING_ENCODER = {
+ "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
+ "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
+ "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
+ "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
+ "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
+ "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
+ "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
+ "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
+ "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
+ "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
+}
+MAPPING_DECODER = {
+ "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
+ "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
+ "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
+ "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
+ "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
+ "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
+ "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
+ "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
+ "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
+ "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
+ "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
+ "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
+ "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
+}
+MAPPING_S2T = {
+ **MAPPING_SPEECH_ENCODER_PRENET,
+ **MAPPING_ENCODER,
+ **MAPPING_DECODER,
+ **MAPPING_TEXT_DECODER_PRENET,
+ **MAPPING_TEXT_DECODER_POSTNET,
+}
+MAPPING_T2S = {
+ **MAPPING_TEXT_ENCODER_PRENET,
+ **MAPPING_ENCODER,
+ **MAPPING_DECODER,
+ **MAPPING_SPEECH_DECODER_PRENET,
+ **MAPPING_SPEECH_DECODER_POSTNET,
+}
+MAPPING_S2S = {
+ **MAPPING_SPEECH_ENCODER_PRENET,
+ **MAPPING_ENCODER,
+ **MAPPING_DECODER,
+ **MAPPING_SPEECH_DECODER_PRENET,
+ **MAPPING_SPEECH_DECODER_POSTNET,
+}
+TOP_LEVEL_KEYS = []
+IGNORE_KEYS = [
+ "encoder.version",
+ "encoder.layers.*.norm_k.weight",
+ "encoder.layers.*.norm_k.bias",
+ "decoder.version",
+ "decoder.layers.*.norm_k.weight",
+ "decoder.layers.*.norm_k.bias",
+ "decoder.pos_emb.pe_k",
+ "speech_encoder_prenet.embed_positions._float_tensor",
+ "text_decoder_prenet.embed_positions._float_tensor",
+]
+IGNORE_KEYS_S2T = IGNORE_KEYS + [
+ "encoder.proj",
+ "text_encoder_prenet.*",
+ "speech_decoder_prenet.*",
+ "speech_decoder_postnet.*",
+]
+IGNORE_KEYS_T2S = IGNORE_KEYS + [
+ "encoder.proj",
+ "speech_encoder_prenet.*",
+ "text_decoder_prenet.*",
+ "text_decoder_postnet.*",
+]
+IGNORE_KEYS_S2S = IGNORE_KEYS + [
+ "encoder.proj",
+ "text_encoder_prenet.*",
+ "text_decoder_prenet.*",
+ "text_decoder_postnet.*",
+]
+
+
+def set_recursively(hf_pointer, key, value, full_name, weight_type):
+ for attribute in key.split("."):
+ hf_pointer = getattr(hf_pointer, attribute)
+
+ if weight_type is not None:
+ hf_shape = getattr(hf_pointer, weight_type).shape
+ else:
+ hf_shape = hf_pointer.shape
+
+ if hf_shape != value.shape:
+ raise ValueError(
+ f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
+ f" {value.shape} for {full_name}"
+ )
+
+ if weight_type == "weight":
+ hf_pointer.weight.data = value
+ elif weight_type == "weight_g":
+ hf_pointer.weight_g.data = value
+ elif weight_type == "weight_v":
+ hf_pointer.weight_v.data = value
+ elif weight_type == "bias":
+ hf_pointer.bias.data = value
+ elif weight_type == "running_mean":
+ hf_pointer.running_mean.data = value
+ elif weight_type == "running_var":
+ hf_pointer.running_var.data = value
+ elif weight_type == "num_batches_tracked":
+ hf_pointer.num_batches_tracked.data = value
+ else:
+ hf_pointer.data = value
+
+ logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
+
+
+def should_ignore(name, ignore_keys):
+ for key in ignore_keys:
+ if key.endswith(".*"):
+ if name.startswith(key[:-1]):
+ return True
+ elif ".*." in key:
+ prefix, suffix = key.split(".*.")
+ if prefix in name and suffix in name:
+ return True
+ elif key in name:
+ return True
+ return False
+
+
+def recursively_load_weights(fairseq_dict, hf_model, task):
+ unused_weights = []
+
+ if task == "s2t":
+ feature_encoder = hf_model.speecht5.encoder.prenet.feature_encoder
+ MAPPING = MAPPING_S2T
+ IGNORE_KEYS = IGNORE_KEYS_S2T
+ elif task == "t2s":
+ feature_encoder = None
+ MAPPING = MAPPING_T2S
+ IGNORE_KEYS = IGNORE_KEYS_T2S
+ elif task == "s2s":
+ feature_encoder = hf_model.speecht5.encoder.prenet.feature_encoder
+ MAPPING = MAPPING_S2S
+ IGNORE_KEYS = IGNORE_KEYS_S2S
+ else:
+ raise ValueError(f"Unsupported task: {task}")
+
+ for name, value in fairseq_dict.items():
+ if should_ignore(name, IGNORE_KEYS):
+ logger.info(f"{name} was ignored")
+ continue
+
+ is_used = False
+ if "conv_layers" in name:
+ load_conv_layer(
+ name,
+ value,
+ feature_encoder,
+ unused_weights,
+ hf_model.config.feat_extract_norm == "group",
+ )
+ is_used = True
+ else:
+ for key, mapped_key in MAPPING.items():
+ # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
+
+ if "*" in key:
+ prefix, suffix = key.split(".*.")
+ if prefix in name and suffix in name:
+ key = suffix
+
+ # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
+ if key in name:
+ is_used = True
+ if "*" in mapped_key:
+ layer_index = name.split(key)[0].split(".")[-2]
+ mapped_key = mapped_key.replace("*", layer_index)
+ if "weight_g" in name:
+ weight_type = "weight_g"
+ elif "weight_v" in name:
+ weight_type = "weight_v"
+ elif "bias" in name:
+ weight_type = "bias"
+ elif "weight" in name:
+ weight_type = "weight"
+ elif "running_mean" in name:
+ weight_type = "running_mean"
+ elif "running_var" in name:
+ weight_type = "running_var"
+ elif "num_batches_tracked" in name:
+ weight_type = "num_batches_tracked"
+ else:
+ weight_type = None
+ set_recursively(hf_model, mapped_key, value, name, weight_type)
+ continue
+ if not is_used:
+ unused_weights.append(name)
+
+ logger.warning(f"Unused weights: {unused_weights}")
+
+
+def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
+ name = full_name.split("conv_layers.")[-1]
+ items = name.split(".")
+ layer_id = int(items[0])
+ type_id = int(items[1])
+
+ if type_id == 0:
+ if "bias" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.bias.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].conv.weight.data = value
+ logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
+ elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
+ if "bias" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ elif "weight" in name:
+ if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
+ raise ValueError(
+ f"{full_name} has size {value.shape}, but"
+ f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found."
+ )
+ feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
+ logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
+ else:
+ unused_weights.append(full_name)
+
+
+@torch.no_grad()
+def convert_speecht5_checkpoint(
+ task,
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ config_path=None,
+ vocab_path=None,
+ repo_id=None,
+):
+ """
+ Copy/paste/tweak model's weights to transformers design.
+ """
+ if config_path is not None:
+ config = SpeechT5Config.from_pretrained(config_path)
+ else:
+ config = SpeechT5Config()
+
+ if task == "s2t":
+ config.max_length = config.max_text_positions
+ model = SpeechT5ForSpeechToText(config)
+ elif task == "t2s":
+ config.max_speech_positions = 1876
+ config.max_text_positions = 600
+ config.max_length = config.max_speech_positions
+ model = SpeechT5ForTextToSpeech(config)
+ elif task == "s2s":
+ config.max_speech_positions = 1876
+ config.max_length = config.max_speech_positions
+ model = SpeechT5ForSpeechToSpeech(config)
+ else:
+ raise ValueError(f"Unknown task name: {task}")
+
+ if vocab_path:
+ tokenizer = SpeechT5Tokenizer(vocab_path, model_max_length=config.max_text_positions)
+
+ # Mask token behaves like a normal word, i.e. include the space before it
+ mask_token = AddedToken("", lstrip=True, rstrip=False)
+ tokenizer.mask_token = mask_token
+ tokenizer.add_special_tokens({"mask_token": mask_token})
+ tokenizer.add_tokens([""])
+
+ feature_extractor = SpeechT5FeatureExtractor()
+ processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor)
+ processor.save_pretrained(pytorch_dump_folder_path)
+
+ fairseq_checkpoint = torch.load(checkpoint_path)
+ recursively_load_weights(fairseq_checkpoint["model"], model, task)
+
+ model.save_pretrained(pytorch_dump_folder_path)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ processor.push_to_hub(repo_id)
+ model.push_to_hub(repo_id)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--task",
+ default="s2t",
+ type=str,
+ help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
+ )
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
+ parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+
+ args = parser.parse_args()
+ convert_speecht5_checkpoint(
+ args.task,
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.config_path,
+ args.vocab_path,
+ args.push_to_hub,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/feature_extraction_speecht5.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/feature_extraction_speecht5.py
new file mode 100644
index 0000000000000000000000000000000000000000..84d51e97df95e044886a7bb5605ed4b4989c9983
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/feature_extraction_speecht5.py
@@ -0,0 +1,393 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for SpeechT5."""
+
+import warnings
+from typing import Any, Dict, List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import PaddingStrategy, TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class SpeechT5FeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a SpeechT5 feature extractor.
+
+ This class can pre-process a raw speech signal by (optionally) normalizing to zero-mean unit-variance, for use by
+ the SpeechT5 speech encoder prenet.
+
+ This class can also extract log-mel filter bank features from raw speech, for use by the SpeechT5 speech decoder
+ prenet.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 1):
+ The feature dimension of the extracted features.
+ sampling_rate (`int`, *optional*, defaults to 16000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ padding_value (`float`, *optional*, defaults to 0.0):
+ The value that is used to fill the padding values.
+ do_normalize (`bool`, *optional*, defaults to `False`):
+ Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
+ improve the performance for some models.
+ num_mel_bins (`int`, *optional*, defaults to 80):
+ The number of mel-frequency bins in the extracted spectrogram features.
+ hop_length (`int`, *optional*, defaults to 16):
+ Number of ms between windows. Otherwise referred to as "shift" in many papers.
+ win_length (`int`, *optional*, defaults to 64):
+ Number of ms per window.
+ win_function (`str`, *optional*, defaults to `"hann_window"`):
+ Name for the window function used for windowing, must be accessible via `torch.{win_function}`
+ frame_signal_scale (`float`, *optional*, defaults to 1.0):
+ Constant multiplied in creating the frames before applying DFT. This argument is deprecated.
+ fmin (`float`, *optional*, defaults to 80):
+ Minimum mel frequency in Hz.
+ fmax (`float`, *optional*, defaults to 7600):
+ Maximum mel frequency in Hz.
+ mel_floor (`float`, *optional*, defaults to 1e-10):
+ Minimum value of mel frequency banks.
+ reduction_factor (`int`, *optional*, defaults to 2):
+ Spectrogram length reduction factor. This argument is deprecated.
+ return_attention_mask (`bool`, *optional*, defaults to `True`):
+ Whether or not [`~SpeechT5FeatureExtractor.__call__`] should return `attention_mask`.
+ """
+
+ model_input_names = ["input_values", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size: int = 1,
+ sampling_rate: int = 16000,
+ padding_value: float = 0.0,
+ do_normalize: bool = False,
+ num_mel_bins: int = 80,
+ hop_length: int = 16,
+ win_length: int = 64,
+ win_function: str = "hann_window",
+ frame_signal_scale: float = 1.0,
+ fmin: float = 80,
+ fmax: float = 7600,
+ mel_floor: float = 1e-10,
+ reduction_factor: int = 2,
+ return_attention_mask: bool = True,
+ **kwargs,
+ ):
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
+ self.do_normalize = do_normalize
+ self.return_attention_mask = return_attention_mask
+
+ self.num_mel_bins = num_mel_bins
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.win_function = win_function
+ self.frame_signal_scale = frame_signal_scale
+ self.fmin = fmin
+ self.fmax = fmax
+ self.mel_floor = mel_floor
+ self.reduction_factor = reduction_factor
+
+ self.sample_size = win_length * sampling_rate // 1000
+ self.sample_stride = hop_length * sampling_rate // 1000
+ self.n_fft = optimal_fft_length(self.sample_size)
+ self.n_freqs = (self.n_fft // 2) + 1
+
+ self.window = window_function(window_length=self.sample_size, name=self.win_function, periodic=True)
+
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=self.n_freqs,
+ num_mel_filters=self.num_mel_bins,
+ min_frequency=self.fmin,
+ max_frequency=self.fmax,
+ sampling_rate=self.sampling_rate,
+ norm="slaney",
+ mel_scale="slaney",
+ )
+
+ if frame_signal_scale != 1.0:
+ warnings.warn(
+ "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers",
+ FutureWarning,
+ )
+ if reduction_factor != 2.0:
+ warnings.warn(
+ "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers",
+ FutureWarning,
+ )
+
+ @staticmethod
+ # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
+ def zero_mean_unit_var_norm(
+ input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
+ ) -> List[np.ndarray]:
+ """
+ Every array in the list is normalized to have zero mean and unit variance
+ """
+ if attention_mask is not None:
+ attention_mask = np.array(attention_mask, np.int32)
+ normed_input_values = []
+
+ for vector, length in zip(input_values, attention_mask.sum(-1)):
+ normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
+ if length < normed_slice.shape[0]:
+ normed_slice[length:] = padding_value
+
+ normed_input_values.append(normed_slice)
+ else:
+ normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
+
+ return normed_input_values
+
+ def _extract_mel_features(
+ self,
+ one_waveform: np.ndarray,
+ ) -> np.ndarray:
+ """
+ Extracts log-mel filterbank features for one waveform array (unbatched).
+ """
+ log_mel_spec = spectrogram(
+ one_waveform,
+ window=self.window,
+ frame_length=self.sample_size,
+ hop_length=self.sample_stride,
+ fft_length=self.n_fft,
+ mel_filters=self.mel_filters,
+ mel_floor=self.mel_floor,
+ log_mel="log10",
+ )
+ return log_mel_spec.T
+
+ def __call__(
+ self,
+ audio: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None,
+ audio_target: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ sampling_rate: Optional[int] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Pass in a value for `audio` to extract waveform features. Pass in a value for `audio_target` to extract log-mel
+ spectrogram features.
+
+ Args:
+ audio (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*):
+ The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. This outputs waveform features. Must
+ be mono channel audio, not stereo, i.e. single float per timestep.
+ audio_target (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`, *optional*):
+ The sequence or batch of sequences to be processed as targets. Each sequence can be a numpy array, a
+ list of float values, a list of numpy arrays or a list of list of float values. This outputs log-mel
+ spectrogram features.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `audio` or `audio_target` input was sampled. It is strongly recommended
+ to pass `sampling_rate` at the forward call to prevent silent errors.
+ """
+ if audio is None and audio_target is None:
+ raise ValueError("You must provide either `audio` or `audio_target` values.")
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
+ f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
+ f" {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ if audio is not None:
+ inputs = self._process_audio(
+ audio,
+ False,
+ padding,
+ max_length,
+ truncation,
+ pad_to_multiple_of,
+ return_attention_mask,
+ return_tensors,
+ **kwargs,
+ )
+ else:
+ inputs = None
+
+ if audio_target is not None:
+ inputs_target = self._process_audio(
+ audio_target,
+ True,
+ padding,
+ max_length,
+ truncation,
+ pad_to_multiple_of,
+ return_attention_mask,
+ return_tensors,
+ **kwargs,
+ )
+
+ if inputs is None:
+ return inputs_target
+ else:
+ inputs["labels"] = inputs_target["input_values"]
+ decoder_attention_mask = inputs_target.get("attention_mask")
+ if decoder_attention_mask is not None:
+ inputs["decoder_attention_mask"] = decoder_attention_mask
+
+ return inputs
+
+ def _process_audio(
+ self,
+ speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ is_target: bool = False,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ max_length: Optional[int] = None,
+ truncation: bool = False,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ is_batched_numpy = isinstance(speech, np.ndarray) and len(speech.shape) > 1
+ if is_batched_numpy and len(speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(speech, (list, tuple)) and (isinstance(speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ speech = [np.asarray(speech, dtype=np.float32) for speech in speech]
+ elif not is_batched and not isinstance(speech, np.ndarray):
+ speech = np.asarray(speech, dtype=np.float32)
+ elif isinstance(speech, np.ndarray) and speech.dtype is np.dtype(np.float64):
+ speech = speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ speech = [speech]
+
+ # needed to make pad() work on spectrogram inputs
+ feature_size_hack = self.feature_size
+
+ # convert into correct format for padding
+ if is_target:
+ features = [self._extract_mel_features(waveform) for waveform in speech]
+ encoded_inputs = BatchFeature({"input_values": features})
+ self.feature_size = self.num_mel_bins
+ else:
+ encoded_inputs = BatchFeature({"input_values": speech})
+
+ padded_inputs = self.pad(
+ encoded_inputs,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+
+ self.feature_size = feature_size_hack
+
+ # convert input values to correct format
+ input_values = padded_inputs["input_values"]
+ if not isinstance(input_values[0], np.ndarray):
+ padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values]
+ elif (
+ not isinstance(input_values, np.ndarray)
+ and isinstance(input_values[0], np.ndarray)
+ and input_values[0].dtype is np.dtype(np.float64)
+ ):
+ padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values]
+ elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64):
+ padded_inputs["input_values"] = input_values.astype(np.float32)
+
+ # convert attention_mask to correct format
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ # zero-mean and unit-variance normalization
+ if not is_target and self.do_normalize:
+ attention_mask = (
+ attention_mask
+ if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
+ else None
+ )
+ padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
+ padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value
+ )
+
+ if return_tensors is not None:
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
+
+ return padded_inputs
+
+ def to_dict(self) -> Dict[str, Any]:
+ output = super().to_dict()
+
+ # Don't serialize these as they are derived from the other properties.
+ names = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
+ for name in names:
+ if name in output:
+ del output[name]
+
+ return output
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/modeling_speecht5.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/modeling_speecht5.py
new file mode 100644
index 0000000000000000000000000000000000000000..071b987dbb5a478e7eec59e6582678c4ec75c33c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/modeling_speecht5.py
@@ -0,0 +1,3362 @@
+# coding=utf-8
+# Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch SpeechT5 model."""
+
+import math
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, L1Loss
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Seq2SeqSpectrogramOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_speecht5 import SpeechT5Config, SpeechT5HifiGanConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+_HIDDEN_STATES_START_POSITION = 1
+
+# General docstring
+_CONFIG_FOR_DOC = "SpeechT5Config"
+
+
+from ..deprecated._archive_maps import SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+def shift_spectrograms_right(input_values: torch.Tensor, reduction_factor: int = 1):
+ """
+ Shift input spectrograms one timestep to the right. Also applies the reduction factor to the sequence length.
+ """
+ # thin out frames for reduction factor
+ if reduction_factor > 1:
+ input_values = input_values[:, reduction_factor - 1 :: reduction_factor]
+
+ shifted_input_values = input_values.new_zeros(input_values.shape)
+ shifted_input_values[:, 1:] = input_values[:, :-1].clone()
+
+ # replace possible -100 values in labels by zeros
+ shifted_input_values.masked_fill_(shifted_input_values == -100.0, 0.0)
+
+ return shifted_input_values
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[torch.LongTensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ batch_size, max_num_masked_span * mask_length
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SpeechT5
+class SpeechT5NoLayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SpeechT5
+class SpeechT5LayerNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+
+ hidden_states = hidden_states.transpose(-2, -1)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose(-2, -1)
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SpeechT5
+class SpeechT5GroupNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1d(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.speech_to_text.modeling_speech_to_text.Speech2TextSinusoidalPositionalEmbedding with Speech2Text->SpeechT5
+class SpeechT5SinusoidalPositionalEmbedding(nn.Module):
+ """This module produces sinusoidal positional embeddings of any length."""
+
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ super().__init__()
+ self.offset = 2
+ self.embedding_dim = embedding_dim
+ self.padding_idx = padding_idx
+ self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
+
+ def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
+ if hasattr(self, "weights"):
+ # in forward put the weights on the correct dtype and device of the param
+ emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
+
+ self.weights = nn.Parameter(emb_weights)
+ self.weights.requires_grad = False
+ self.weights.detach_()
+
+ @staticmethod
+ def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
+ """
+ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
+ description in Section 3.5 of "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+ return emb.to(torch.get_default_dtype())
+
+ @torch.no_grad()
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ bsz, seq_len = input_ids.size()
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
+ input_ids.device
+ )
+
+ # expand embeddings if needed
+ max_pos = self.padding_idx + 1 + seq_len
+ if max_pos > self.weights.size(0):
+ self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
+
+ return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
+
+ def create_position_ids_from_input_ids(
+ self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
+ ):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ x: torch.Tensor x:
+ Returns: torch.Tensor
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
+ return incremental_indices.long() + padding_idx
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->SpeechT5
+class SpeechT5PositionalConvEmbedding(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ )
+
+ weight_norm = nn.utils.weight_norm
+ if hasattr(nn.utils.parametrizations, "weight_norm"):
+ weight_norm = nn.utils.parametrizations.weight_norm
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
+ deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
+ else:
+ self.conv = weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = SpeechT5SamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose(1, 2)
+ return hidden_states
+
+
+class SpeechT5ScaledPositionalEncoding(nn.Module):
+ """
+ Scaled positional encoding, see §3.2 in https://arxiv.org/abs/1809.08895
+ """
+
+ def __init__(self, dropout, dim, max_len=5000):
+ pe = torch.zeros(max_len, dim)
+ position = torch.arange(0, max_len).unsqueeze(1)
+ div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / dim)))
+ pe[:, 0::2] = torch.sin(position.float() * div_term)
+ pe[:, 1::2] = torch.cos(position.float() * div_term)
+ pe = pe.unsqueeze(0)
+ super().__init__()
+ self.register_buffer("pe", pe, persistent=False)
+ self.dropout = nn.Dropout(p=dropout)
+ self.dim = dim
+ self.alpha = torch.nn.Parameter(torch.tensor(1.0))
+
+ def forward(self, emb):
+ emb = emb + self.alpha * self.pe[:, : emb.size(1)]
+ emb = self.dropout(emb)
+ return emb
+
+
+class SpeechT5RelativePositionalEncoding(torch.nn.Module):
+ def __init__(self, dim, max_length=1000):
+ super().__init__()
+ self.dim = dim
+ self.max_length = max_length
+ self.pe_k = torch.nn.Embedding(2 * max_length, dim)
+
+ def forward(self, hidden_states):
+ seq_len = hidden_states.shape[1]
+ pos_seq = torch.arange(0, seq_len).long().to(hidden_states.device)
+ pos_seq = pos_seq[:, None] - pos_seq[None, :]
+
+ pos_seq[pos_seq < -self.max_length] = -self.max_length
+ pos_seq[pos_seq >= self.max_length] = self.max_length - 1
+ pos_seq = pos_seq + self.max_length
+
+ return self.pe_k(pos_seq)
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SpeechT5
+class SpeechT5SamePadLayer(nn.Module):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SpeechT5
+class SpeechT5FeatureEncoder(nn.Module):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [SpeechT5GroupNormConvLayer(config, layer_id=0)] + [
+ SpeechT5NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [
+ SpeechT5LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
+ ]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.ModuleList(conv_layers)
+ self.gradient_checkpointing = False
+ self._requires_grad = True
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+
+ # make sure hidden_states require grad for gradient_checkpointing
+ if self._requires_grad and self.training:
+ hidden_states.requires_grad = True
+
+ for conv_layer in self.conv_layers:
+ if self._requires_grad and self.gradient_checkpointing and self.training:
+ hidden_states = self._gradient_checkpointing_func(
+ conv_layer.__call__,
+ hidden_states,
+ )
+ else:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->SpeechT5
+class SpeechT5FeatureProjection(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states, norm_hidden_states
+
+
+class SpeechT5SpeechEncoderPrenet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.feature_encoder = SpeechT5FeatureEncoder(config)
+ self.feature_projection = SpeechT5FeatureProjection(config)
+
+ # model only needs masking vector if mask prob is > 0.0
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
+
+ self.pos_conv_embed = SpeechT5PositionalConvEmbedding(config)
+ self.pos_sinusoidal_embed = SpeechT5SinusoidalPositionalEmbedding(
+ config.max_speech_positions + config.pad_token_id + 1,
+ config.hidden_size,
+ config.pad_token_id,
+ )
+
+ def freeze_feature_encoder(self):
+ self.feature_encoder._freeze_parameters()
+
+ def forward(
+ self,
+ input_values: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ ):
+ extract_features = self.feature_encoder(input_values)
+ extract_features = extract_features.transpose(1, 2)
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(
+ extract_features.shape[1],
+ attention_mask,
+ )
+
+ hidden_states, extract_features = self.feature_projection(extract_features)
+ hidden_states = self._mask_hidden_states(
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
+ )
+
+ positional_conv_embedding = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + positional_conv_embedding
+
+ if attention_mask is not None:
+ padding_mask = attention_mask.ne(1).long()
+ else:
+ padding_mask = torch.zeros(hidden_states.shape[:2], dtype=torch.long, device=hidden_states.device)
+
+ positional_sinusoidal_embeddings = self.pos_sinusoidal_embed(padding_mask)
+ hidden_states = hidden_states + positional_sinusoidal_embeddings
+
+ return hidden_states, attention_mask
+
+ # Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feature_vector_attention_mask
+ def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
+ # Effectively attention_mask.sum(-1), but not inplace to be able to run
+ # on inference mode.
+ non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
+ output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
+ batch_size = attention_mask.shape[0]
+
+ attention_mask = torch.zeros(
+ (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
+ )
+ # these two operations makes sure that all values before the output lengths idxs are attended to
+ attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
+ attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
+ return attention_mask
+
+ # Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feat_extract_output_lengths
+ def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
+ """
+ Computes the output length of the convolutional layers
+ """
+
+ def _conv_out_length(input_length, kernel_size, stride):
+ # 1D convolutional layer output length formula taken
+ # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
+ return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
+
+ for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
+ input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
+
+ return input_lengths
+
+ # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
+ def _mask_hidden_states(
+ self,
+ hidden_states: torch.FloatTensor,
+ mask_time_indices: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.size()
+
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+
+class SpeechT5SpeechDecoderPrenet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.layers = nn.ModuleList(
+ [
+ nn.Linear(
+ config.num_mel_bins if i == 0 else config.speech_decoder_prenet_units,
+ config.speech_decoder_prenet_units,
+ )
+ for i in range(config.speech_decoder_prenet_layers)
+ ]
+ )
+
+ self.final_layer = nn.Linear(config.speech_decoder_prenet_units, config.hidden_size)
+ self.encode_positions = SpeechT5ScaledPositionalEncoding(
+ config.positional_dropout,
+ config.hidden_size,
+ config.max_speech_positions,
+ )
+ self.speaker_embeds_layer = nn.Linear(config.speaker_embedding_dim + config.hidden_size, config.hidden_size)
+
+ def _consistent_dropout(self, inputs_embeds, p):
+ mask = torch.bernoulli(inputs_embeds[0], p=p)
+ all_masks = mask.unsqueeze(0).repeat(inputs_embeds.size(0), 1, 1)
+ return torch.where(all_masks == 1, inputs_embeds, 0) * 1 / (1 - p)
+
+ def forward(
+ self,
+ input_values: torch.Tensor,
+ speaker_embeddings: Optional[torch.Tensor] = None,
+ ):
+ # Dropout is always applied, even when evaluating. See §2.2 in https://arxiv.org/abs/1712.05884.
+
+ inputs_embeds = input_values
+ for layer in self.layers:
+ inputs_embeds = nn.functional.relu(layer(inputs_embeds))
+ inputs_embeds = self._consistent_dropout(inputs_embeds, self.config.speech_decoder_prenet_dropout)
+
+ inputs_embeds = self.final_layer(inputs_embeds)
+ inputs_embeds = self.encode_positions(inputs_embeds)
+
+ if speaker_embeddings is not None:
+ speaker_embeddings = nn.functional.normalize(speaker_embeddings)
+ speaker_embeddings = speaker_embeddings.unsqueeze(1).expand(-1, inputs_embeds.size(1), -1)
+ inputs_embeds = torch.cat([inputs_embeds, speaker_embeddings], dim=-1)
+ inputs_embeds = nn.functional.relu(self.speaker_embeds_layer(inputs_embeds))
+
+ return inputs_embeds
+
+
+class SpeechT5BatchNormConvLayer(nn.Module):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+
+ if layer_id == 0:
+ in_conv_dim = config.num_mel_bins
+ else:
+ in_conv_dim = config.speech_decoder_postnet_units
+
+ if layer_id == config.speech_decoder_postnet_layers - 1:
+ out_conv_dim = config.num_mel_bins
+ else:
+ out_conv_dim = config.speech_decoder_postnet_units
+
+ self.conv = nn.Conv1d(
+ in_conv_dim,
+ out_conv_dim,
+ kernel_size=config.speech_decoder_postnet_kernel,
+ stride=1,
+ padding=(config.speech_decoder_postnet_kernel - 1) // 2,
+ bias=False,
+ )
+ self.batch_norm = nn.BatchNorm1d(out_conv_dim)
+
+ if layer_id < config.speech_decoder_postnet_layers - 1:
+ self.activation = nn.Tanh()
+ else:
+ self.activation = None
+
+ self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.batch_norm(hidden_states)
+ if self.activation is not None:
+ hidden_states = self.activation(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class SpeechT5SpeechDecoderPostnet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
+ self.prob_out = nn.Linear(config.hidden_size, config.reduction_factor)
+
+ self.layers = nn.ModuleList(
+ [SpeechT5BatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]
+ )
+
+ def forward(self, hidden_states: torch.Tensor):
+ outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
+ outputs_after_postnet = self.postnet(outputs_before_postnet)
+ logits = self.prob_out(hidden_states).view(hidden_states.size(0), -1)
+ return outputs_before_postnet, outputs_after_postnet, logits
+
+ def postnet(self, hidden_states: torch.Tensor):
+ layer_output = hidden_states.transpose(1, 2)
+ for layer in self.layers:
+ layer_output = layer(layer_output)
+ return hidden_states + layer_output.transpose(1, 2)
+
+
+class SpeechT5TextEncoderPrenet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+ self.encode_positions = SpeechT5ScaledPositionalEncoding(
+ config.positional_dropout,
+ config.hidden_size,
+ config.max_text_positions,
+ )
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(self, input_ids: torch.Tensor):
+ inputs_embeds = self.embed_tokens(input_ids)
+ inputs_embeds = self.encode_positions(inputs_embeds)
+ return inputs_embeds
+
+
+class SpeechT5TextDecoderPrenet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.dropout = nn.Dropout(config.positional_dropout)
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
+
+ self.embed_positions = SpeechT5SinusoidalPositionalEmbedding(
+ config.max_text_positions + config.pad_token_id + 1,
+ config.hidden_size,
+ config.pad_token_id,
+ )
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ ):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ else:
+ raise ValueError("You have to specify `decoder_input_ids`")
+
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+ positions = self.embed_positions(input_ids, past_key_values_length)
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+ inputs_embeds += positions
+ inputs_embeds = self.dropout(inputs_embeds)
+
+ return inputs_embeds, attention_mask
+
+
+class SpeechT5TextDecoderPostnet(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ def forward(self, hidden_states: torch.Tensor):
+ return self.lm_head(hidden_states)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+
+class SpeechT5Attention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper with relative position bias (see
+ https://aclanthology.org/N18-2074.pdf)
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ position_bias: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # relative attention bias
+ if position_bias is not None:
+ reshape_q = query_states.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0, 1)
+ rel_pos_bias = torch.matmul(reshape_q, position_bias.transpose(-2, -1))
+ rel_pos_bias = rel_pos_bias.transpose(0, 1).view(
+ bsz * self.num_heads, position_bias.size(0), position_bias.size(1)
+ )
+ attn_weights += rel_pos_bias
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned aross GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class SpeechT5FeedForward(nn.Module):
+ def __init__(self, config, intermediate_size):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size, intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class SpeechT5EncoderLayer(nn.Module):
+ def __init__(self, config: SpeechT5Config):
+ super().__init__()
+ self.attention = SpeechT5Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.feed_forward = SpeechT5FeedForward(config, config.encoder_ffn_dim)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ position_bias: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`):
+ input to the layer of shape `(batch, seq_len, hidden_size)`
+ attention_mask (`torch.FloatTensor`):
+ attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very
+ large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(config.encoder_attention_heads,)`.
+ position_bias (`torch.FloatTensor`):
+ relative position embeddings of size `(seq_len, seq_len, hidden_size // encoder_attention_heads)`
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class SpeechT5DecoderLayer(nn.Module):
+ def __init__(self, config: SpeechT5Config):
+ super().__init__()
+ self.self_attn = SpeechT5Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.encoder_attn = SpeechT5Attention(
+ config.hidden_size,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.feed_forward = SpeechT5FeedForward(config, config.decoder_ffn_dim)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ):
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, hidden_size)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = residual + hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = residual + hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class SpeechT5PreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = SpeechT5Config
+ base_model_prefix = "speecht5"
+ main_input_name = "input_values"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, SpeechT5PositionalConvEmbedding):
+ nn.init.normal_(
+ module.conv.weight,
+ mean=0,
+ std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
+ )
+ nn.init.constant_(module.conv.bias, 0)
+ elif isinstance(module, SpeechT5FeatureProjection):
+ k = math.sqrt(1 / module.projection.in_features)
+ nn.init.uniform_(module.projection.weight, a=-k, b=k)
+ nn.init.uniform_(module.projection.bias, a=-k, b=k)
+ elif isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+ elif isinstance(module, nn.Conv1d):
+ nn.init.kaiming_normal_(module.weight)
+ if module.bias is not None:
+ k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
+ nn.init.uniform_(module.bias, a=-k, b=k)
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+
+class SpeechT5Encoder(SpeechT5PreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* layers. Each layer is a [`SpeechT5EncoderLayer`].
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layerdrop = config.encoder_layerdrop
+
+ self.layers = nn.ModuleList([SpeechT5EncoderLayer(config) for _ in range(config.encoder_layers)])
+
+ self.embed_positions = SpeechT5RelativePositionalEncoding(
+ config.hidden_size // config.encoder_attention_heads, config.encoder_max_relative_position
+ )
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Features extracted from the speech or text input by the encoder prenet.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ position_bias = self.embed_positions(hidden_states)
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ skip_the_layer = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ skip_the_layer = dropout_probability < self.layerdrop
+
+ if not skip_the_layer or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ position_bias,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_bias=position_bias,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class SpeechT5EncoderWithSpeechPrenet(SpeechT5PreTrainedModel):
+ """
+ Wrapper around SpeechT5Encoder that applies SpeechT5SpeechEncoderPrenet to convert the audio waveform data to
+ hidden features.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.prenet = SpeechT5SpeechEncoderPrenet(config)
+ self.wrapped_encoder = SpeechT5Encoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_values: torch.FloatTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ hidden_states, attention_mask = self.prenet(input_values, attention_mask)
+
+ outputs = self.wrapped_encoder(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return outputs
+
+
+class SpeechT5EncoderWithTextPrenet(SpeechT5PreTrainedModel):
+ """
+ Wrapper around SpeechT5Encoder that applies SpeechT5TextEncoderPrenet to convert the input_ids to hidden features.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.prenet = SpeechT5TextEncoderPrenet(config)
+ self.wrapped_encoder = SpeechT5Encoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prenet.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.prenet.set_input_embeddings(value)
+
+ def forward(
+ self,
+ input_values: torch.FloatTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ hidden_states = self.prenet(input_values)
+
+ outputs = self.wrapped_encoder(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return outputs
+
+
+class SpeechT5EncoderWithoutPrenet(SpeechT5PreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with
+ [`SpeechT5Model`].
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.wrapped_encoder = SpeechT5Encoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_values: torch.FloatTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ return self.wrapped_encoder(
+ hidden_states=input_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class SpeechT5Decoder(SpeechT5PreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SpeechT5DecoderLayer`]
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.layerdrop = config.decoder_layerdrop
+
+ self.layers = nn.ModuleList([SpeechT5DecoderLayer(config) for _ in range(config.decoder_layers)])
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ hidden_states: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
+ Features extracted from the speech or text input by the decoder prenet.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ input_shape = hidden_states.size()[:-1]
+
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, hidden_states, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, hidden_states.dtype, tgt_len=input_shape[-1]
+ )
+
+ deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != (len(self.layers)):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ skip_the_layer = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ skip_the_layer = dropout_probability < self.layerdrop
+ if skip_the_layer and not deepspeed_zero3_is_enabled:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions, all_cross_attentions]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class SpeechT5DecoderWithSpeechPrenet(SpeechT5PreTrainedModel):
+ """
+ Wrapper around SpeechT5Decoder that applies SpeechT5SpeechDecoderPrenet to convert log-mel filterbanks to hidden
+ features.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.prenet = SpeechT5SpeechDecoderPrenet(config)
+ self.wrapped_decoder = SpeechT5Decoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ speaker_embeddings: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ decoder_hidden_states = self.prenet(input_values, speaker_embeddings)
+
+ outputs = self.wrapped_decoder(
+ hidden_states=decoder_hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return outputs
+
+
+class SpeechT5DecoderWithTextPrenet(SpeechT5PreTrainedModel):
+ """
+ Wrapper around SpeechT5Decoder that applies SpeechT5TextDecoderPrenet to convert input tokens to hidden features.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.prenet = SpeechT5TextDecoderPrenet(config)
+ self.wrapped_decoder = SpeechT5Decoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.prenet.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.prenet.set_input_embeddings(value)
+
+ def forward(
+ self,
+ input_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ decoder_hidden_states, attention_mask = self.prenet(input_values, attention_mask, past_key_values)
+
+ outputs = self.wrapped_decoder(
+ hidden_states=decoder_hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ return outputs
+
+
+class SpeechT5DecoderWithoutPrenet(SpeechT5PreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with
+ [`SpeechT5Model`].
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+ self.wrapped_decoder = SpeechT5Decoder(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ outputs = self.wrapped_decoder(
+ hidden_states=input_values,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ return outputs
+
+
+class SpeechT5GuidedMultiheadAttentionLoss(nn.Module):
+ """
+ Guided attention loss from the paper [Efficiently Trainable Text-to-Speech System Based on Deep Convolutional
+ Networks with Guided Attention](https://arxiv.org/abs/1710.08969), adapted for multi-head attention.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__()
+ self.sigma = config.guided_attention_loss_sigma
+ self.scale = config.guided_attention_loss_scale
+
+ def forward(
+ self, attentions: torch.FloatTensor, input_masks: torch.BoolTensor, output_masks: torch.BoolTensor
+ ) -> torch.Tensor:
+ """
+ Compute the attention loss.
+
+ Args:
+ attentions (`torch.FloatTensor` of shape `(batch_size, layers * heads, output_sequence_length, input_sequence_length)`):
+ Batch of multi-head attention weights
+ input_masks (`torch.BoolTensor` of shape `(batch_size, input_sequence_length)`):
+ Input attention mask as booleans.
+ output_masks (`torch.BoolTensor` of shape `(batch_size, output_sequence_length)`):
+ Target attention mask as booleans.
+
+ Returns:
+ `torch.Tensor` with the loss value
+ """
+ guided_attn_masks = self._make_guided_attention_masks(input_masks, output_masks, attentions.device)
+ masks = output_masks.unsqueeze(-1) & input_masks.unsqueeze(-2)
+ masks = masks.to(attentions.device).unsqueeze(1)
+
+ losses = guided_attn_masks * attentions
+ loss = torch.mean(losses.masked_select(masks))
+ return self.scale * loss
+
+ def _make_guided_attention_masks(self, input_masks, output_masks, device):
+ input_lengths = input_masks.sum(-1)
+ output_lengths = output_masks.sum(-1)
+
+ guided_attn_masks = torch.zeros((len(input_masks), output_masks.shape[1], input_masks.shape[1]), device=device)
+
+ for idx, (ilen, olen) in enumerate(zip(input_lengths, output_lengths)):
+ guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(ilen, olen, self.sigma, device)
+
+ return guided_attn_masks.unsqueeze(1)
+
+ @staticmethod
+ def _make_guided_attention_mask(input_length, output_length, sigma, device):
+ grid_y, grid_x = torch.meshgrid(
+ torch.arange(input_length, device=device),
+ torch.arange(output_length, device=device),
+ indexing="xy",
+ )
+ grid_x = grid_x.float() / output_length
+ grid_y = grid_y.float() / input_length
+ return 1.0 - torch.exp(-((grid_y - grid_x) ** 2) / (2 * (sigma**2)))
+
+
+class SpeechT5SpectrogramLoss(nn.Module):
+ """
+ Loss computation used by SpeechT5ForTextToSpeech.
+ """
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__()
+ self.use_guided_attention_loss = config.use_guided_attention_loss
+ self.guided_attention_loss_num_heads = config.guided_attention_loss_num_heads
+ self.reduction_factor = config.reduction_factor
+
+ self.l1_criterion = L1Loss()
+ self.bce_criterion = BCEWithLogitsLoss(pos_weight=torch.tensor(5.0))
+
+ if self.use_guided_attention_loss:
+ self.attn_criterion = SpeechT5GuidedMultiheadAttentionLoss(config)
+
+ def forward(
+ self,
+ attention_mask: torch.LongTensor,
+ outputs_before_postnet: torch.FloatTensor,
+ outputs_after_postnet: torch.FloatTensor,
+ logits: torch.FloatTensor,
+ labels: torch.FloatTensor,
+ cross_attentions: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ padding_mask = labels != -100.0
+
+ # mask out the padded portions
+ labels = labels.masked_select(padding_mask)
+ outputs_before_postnet = outputs_before_postnet.masked_select(padding_mask)
+ outputs_after_postnet = outputs_after_postnet.masked_select(padding_mask)
+
+ # spectrogram loss
+ l1_loss = self.l1_criterion(outputs_after_postnet, labels) + self.l1_criterion(outputs_before_postnet, labels)
+
+ # construct stop labels from the padding mask
+ masks = padding_mask[:, :, 0]
+ stop_labels = torch.cat([~masks * 1.0, torch.ones(masks.size(0), 1).to(masks.device)], dim=1)
+ stop_labels = stop_labels[:, 1:].masked_select(masks)
+ logits = logits.masked_select(masks)
+
+ # stop token loss
+ bce_loss = self.bce_criterion(logits, stop_labels)
+
+ # combined loss
+ loss = l1_loss + bce_loss
+
+ # guided attention loss
+ if self.use_guided_attention_loss:
+ attn = torch.cat([x[:, : self.guided_attention_loss_num_heads] for x in cross_attentions], dim=1)
+ input_masks = attention_mask == 1
+ output_masks = padding_mask[:, :, 0]
+ if self.reduction_factor > 1:
+ output_masks = output_masks[:, self.reduction_factor - 1 :: self.reduction_factor]
+ attn_loss = self.attn_criterion(attn, input_masks, output_masks)
+ loss += attn_loss
+
+ return loss
+
+
+SPEECHT5_BASE_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SpeechT5Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+ encoder ([`SpeechT5EncoderWithSpeechPrenet`] or [`SpeechT5EncoderWithTextPrenet`] or `None`):
+ The Transformer encoder module that applies the appropiate speech or text encoder prenet. If `None`,
+ [`SpeechT5EncoderWithoutPrenet`] will be used and the `input_values` are assumed to be hidden states.
+ decoder ([`SpeechT5DecoderWithSpeechPrenet`] or [`SpeechT5DecoderWithTextPrenet`] or `None`):
+ The Transformer decoder module that applies the appropiate speech or text decoder prenet. If `None`,
+ [`SpeechT5DecoderWithoutPrenet`] will be used and the `decoder_input_values` are assumed to be hidden
+ states.
+"""
+
+
+SPEECHT5_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SpeechT5Config`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+SPEECHT5_INPUTS_DOCSTRING = r"""
+ Args:
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
+ 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+
+
+ `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
+ True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
+ **not** be passed to avoid degraded performance when doing batched inference. For such models
+ `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
+ models also yield slightly different results depending on whether `input_values` is padded or not.
+
+
+
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will
+ also be used by default.
+
+ If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ head_mask (`torch.FloatTensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.FloatTensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_values` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_values` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor`
+ of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
+ `decoder_input_values` you can choose to directly pass an embedded representation. If `past_key_values` is
+ used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is
+ useful if you want more control over how to convert `decoder_input_values` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets.",
+ SPEECHT5_BASE_START_DOCSTRING,
+)
+class SpeechT5Model(SpeechT5PreTrainedModel):
+ def __init__(
+ self,
+ config: SpeechT5Config,
+ encoder: Optional[nn.Module] = None,
+ decoder: Optional[nn.Module] = None,
+ ):
+ super().__init__(config)
+ self.config = config
+ self.encoder = SpeechT5EncoderWithoutPrenet(config) if encoder is None else encoder
+ self.decoder = SpeechT5DecoderWithoutPrenet(config) if decoder is None else decoder
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet):
+ return self.encoder.get_input_embeddings()
+ if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet):
+ return self.decoder.get_input_embeddings()
+ return None
+
+ def set_input_embeddings(self, value):
+ if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet):
+ self.encoder.set_input_embeddings(value)
+ if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet):
+ self.decoder.set_input_embeddings(value)
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ if isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet):
+ self.encoder.prenet.freeze_feature_encoder()
+
+ @add_start_docstrings_to_model_forward(SPEECHT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_values: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_values: Optional[torch.Tensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ input_values (`torch.Tensor` of shape `(batch_size, sequence_length)`):
+ Depending on which encoder is being used, the `input_values` are either: float values of the input raw
+ speech waveform, or indices of input sequence tokens in the vocabulary, or hidden states.
+
+ decoder_input_values (`torch.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Depending on which decoder is being used, the `decoder_input_values` are either: float values of log-mel
+ filterbank features extracted from the raw speech waveform, or indices of decoder input sequence tokens in
+ the vocabulary, or hidden states.
+
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+
+ Returns:
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Encode if needed (training, first prediction pass)
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_values=input_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # downsample encoder attention mask (only for encoders with speech input)
+ if attention_mask is not None and isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet):
+ encoder_attention_mask = self.encoder.prenet._get_feature_vector_attention_mask(
+ encoder_outputs[0].shape[1], attention_mask
+ )
+ else:
+ encoder_attention_mask = attention_mask
+
+ if isinstance(self.decoder, SpeechT5DecoderWithSpeechPrenet):
+ decoder_args = {"speaker_embeddings": speaker_embeddings}
+ else:
+ decoder_args = {}
+
+ decoder_outputs = self.decoder(
+ input_values=decoder_input_values,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ **decoder_args,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """SpeechT5 Model with a speech encoder and a text decoder.""",
+ SPEECHT5_START_DOCSTRING,
+)
+class SpeechT5ForSpeechToText(SpeechT5PreTrainedModel):
+ _tied_weights_keys = ["text_decoder_postnet.lm_head.weight"]
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that does not define the"
+ " vocabulary size of the language model head. Please instantiate the model as follows:"
+ " `SpeechT5ForSpeechToText.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of"
+ " your model's configuration."
+ )
+
+ speech_encoder = SpeechT5EncoderWithSpeechPrenet(config)
+ text_decoder = SpeechT5DecoderWithTextPrenet(config)
+ self.speecht5 = SpeechT5Model(config, speech_encoder, text_decoder)
+
+ self.text_decoder_postnet = SpeechT5TextDecoderPostnet(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.speecht5.get_encoder()
+
+ def get_decoder(self):
+ return self.speecht5.get_decoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.get_encoder().prenet.freeze_feature_encoder()
+
+ def get_output_embeddings(self):
+ return self.text_decoder_postnet.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ self.text_decoder_postnet.set_output_embeddings(new_embeddings)
+
+ @add_start_docstrings_to_model_forward(SPEECHT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: Optional[torch.LongTensor] = None,
+ ) -> Union[Tuple, Seq2SeqLMOutput]:
+ r"""
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install
+ soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding
+ and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.
+
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ SpeechT5 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
+ or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
+ only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Label indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToText
+ >>> from datasets import load_dataset
+
+ >>> dataset = load_dataset(
+ ... "hf-internal-testing/librispeech_asr_demo", "clean", split="validation"
+ ... ) # doctest: +IGNORE_RESULT
+ >>> dataset = dataset.sort("id")
+ >>> sampling_rate = dataset.features["audio"].sampling_rate
+
+ >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_asr")
+ >>> model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr")
+
+ >>> # audio file is decoded on the fly
+ >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
+ >>> predicted_ids = model.generate(**inputs, max_length=100)
+
+ >>> # transcribe speech
+ >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
+ >>> transcription[0]
+ 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'
+ ```
+
+ ```python
+ >>> inputs["labels"] = processor(text_target=dataset[0]["text"], return_tensors="pt").input_ids
+
+ >>> # compute loss
+ >>> loss = model(**inputs).loss
+ >>> round(loss.item(), 2)
+ 19.68
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.speecht5(
+ input_values=input_values,
+ attention_mask=attention_mask,
+ decoder_input_values=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ logits = self.text_decoder_postnet(outputs[0])
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+def _generate_speech(
+ model: SpeechT5PreTrainedModel,
+ input_values: torch.FloatTensor,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ threshold: float = 0.5,
+ minlenratio: float = 0.0,
+ maxlenratio: float = 20.0,
+ vocoder: Optional[nn.Module] = None,
+ output_cross_attentions: bool = False,
+ return_output_lengths: bool = False,
+) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
+ if speaker_embeddings is None:
+ raise ValueError(
+ """`speaker_embeddings` must be specified. For example, you can use a speaker embeddings by following
+ the code snippet provided in this link:
+ https://huggingface.co/datasets/Matthijs/cmu-arctic-xvectors
+ """
+ )
+
+ if attention_mask is None:
+ encoder_attention_mask = 1 - (input_values == model.config.pad_token_id).int()
+ else:
+ encoder_attention_mask = attention_mask
+
+ bsz = input_values.size(0)
+
+ encoder_out = model.speecht5.encoder(
+ input_values=input_values,
+ attention_mask=encoder_attention_mask,
+ return_dict=True,
+ )
+
+ encoder_last_hidden_state = encoder_out.last_hidden_state
+
+ # downsample encoder attention mask
+ if isinstance(model.speecht5.encoder, SpeechT5EncoderWithSpeechPrenet):
+ encoder_attention_mask = model.speecht5.encoder.prenet._get_feature_vector_attention_mask(
+ encoder_out[0].shape[1], encoder_attention_mask
+ )
+
+ maxlen = int(encoder_last_hidden_state.size(1) * maxlenratio / model.config.reduction_factor)
+ minlen = int(encoder_last_hidden_state.size(1) * minlenratio / model.config.reduction_factor)
+
+ # Start the output sequence with a mel spectrum that is all zeros.
+ output_sequence = encoder_last_hidden_state.new_zeros(bsz, 1, model.config.num_mel_bins)
+
+ spectrogram = []
+ cross_attentions = []
+ past_key_values = None
+ idx = 0
+ result_spectrogram = {}
+
+ while True:
+ idx += 1
+
+ # Run the decoder prenet on the entire output sequence.
+ decoder_hidden_states = model.speecht5.decoder.prenet(output_sequence, speaker_embeddings)
+ # Run the decoder layers on the last element of the prenet output.
+ decoder_out = model.speecht5.decoder.wrapped_decoder(
+ hidden_states=decoder_hidden_states[:, -1:],
+ attention_mask=None,
+ encoder_hidden_states=encoder_last_hidden_state,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_values=past_key_values,
+ use_cache=True,
+ output_attentions=output_cross_attentions,
+ return_dict=True,
+ )
+
+ if output_cross_attentions:
+ cross_attentions.append(torch.cat(decoder_out.cross_attentions, dim=0))
+
+ last_decoder_output = decoder_out.last_hidden_state.squeeze(1)
+ past_key_values = decoder_out.past_key_values
+
+ # Predict the new mel spectrum for this step in the sequence.
+ spectrum = model.speech_decoder_postnet.feat_out(last_decoder_output)
+ spectrum = spectrum.view(bsz, model.config.reduction_factor, model.config.num_mel_bins)
+ spectrogram.append(spectrum)
+
+ # Extend the output sequence with the new mel spectrum.
+ new_spectrogram = spectrum[:, -1, :].view(bsz, 1, model.config.num_mel_bins)
+ output_sequence = torch.cat((output_sequence, new_spectrogram), dim=1)
+ # Predict the probability that this is the stop token.
+ prob = torch.sigmoid(model.speech_decoder_postnet.prob_out(last_decoder_output))
+
+ if idx < minlen:
+ continue
+ else:
+ # If the generation loop is less than maximum length time, check the ones in the batch that have met
+ # the prob threshold. Otherwise, assume all have met thresholds and fill other spectrograms for the batch.
+ if idx < maxlen:
+ meet_thresholds = torch.sum(prob, dim=-1) >= threshold
+ meet_indexes = torch.where(meet_thresholds)[0].tolist()
+ else:
+ meet_indexes = range(len(prob))
+ meet_indexes = [i for i in meet_indexes if i not in result_spectrogram]
+ if len(meet_indexes) > 0:
+ spectrograms = torch.stack(spectrogram)
+ spectrograms = spectrograms.transpose(0, 1).flatten(1, 2)
+ spectrograms = model.speech_decoder_postnet.postnet(spectrograms)
+ for meet_index in meet_indexes:
+ result_spectrogram[meet_index] = spectrograms[meet_index]
+ if len(result_spectrogram) >= bsz:
+ break
+ spectrograms = [result_spectrogram[i] for i in range(len(result_spectrogram))]
+ if not return_output_lengths:
+ spectrogram = spectrograms[0] if bsz == 1 else torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True)
+ if vocoder is not None:
+ outputs = vocoder(spectrogram)
+ else:
+ outputs = spectrogram
+ if output_cross_attentions:
+ cross_attentions = torch.cat(cross_attentions, dim=2)
+ if bsz > 1:
+ cross_attentions = cross_attentions.view(
+ bsz, int(cross_attentions.size(0) / bsz), *cross_attentions.size()[-3:]
+ )
+ outputs = (outputs, cross_attentions)
+ else:
+ # batched return values should also include the spectrogram/waveform lengths
+ spectrogram_lengths = []
+ for i in range(bsz):
+ spectrogram_lengths.append(spectrograms[i].size(0))
+ if vocoder is None:
+ spectrograms = torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True)
+ outputs = (spectrograms, spectrogram_lengths)
+ else:
+ waveforms = []
+ spectrograms = torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True)
+ waveforms = vocoder(spectrograms)
+ waveform_lengths = [int(waveforms.size(1) / max(spectrogram_lengths)) * i for i in spectrogram_lengths]
+ outputs = (waveforms, waveform_lengths)
+ if output_cross_attentions:
+ cross_attentions = torch.cat(cross_attentions, dim=2)
+ cross_attentions = cross_attentions.view(
+ bsz, int(cross_attentions.size(0) / bsz), *cross_attentions.size()[-3:]
+ )
+ outputs = (*outputs, cross_attentions)
+ return outputs
+
+
+@add_start_docstrings(
+ """SpeechT5 Model with a text encoder and a speech decoder.""",
+ SPEECHT5_START_DOCSTRING,
+)
+class SpeechT5ForTextToSpeech(SpeechT5PreTrainedModel):
+ main_input_name = "input_ids"
+
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+
+ if config.vocab_size is None:
+ raise ValueError(
+ f"You are trying to instantiate {self.__class__} with a configuration that does not define the"
+ " vocabulary size of the language model head. Please instantiate the model as follows:"
+ " `SpeechT5ForTextToSpeech.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of"
+ " your model's configuration."
+ )
+
+ text_encoder = SpeechT5EncoderWithTextPrenet(config)
+ speech_decoder = SpeechT5DecoderWithSpeechPrenet(config)
+ self.speecht5 = SpeechT5Model(config, text_encoder, speech_decoder)
+
+ self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.speecht5.get_encoder()
+
+ def get_decoder(self):
+ return self.speecht5.get_decoder()
+
+ @add_start_docstrings_to_model_forward(SPEECHT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqSpectrogramOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_values: Optional[torch.FloatTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ stop_labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, Seq2SeqSpectrogramOutput]:
+ r"""
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
+ [`~PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):
+ Float values of input mel spectrogram.
+
+ SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see
+ `past_key_values`).
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+ labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):
+ Float values of target mel spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss
+ computation. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`]
+ for details.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, set_seed
+ >>> import torch
+
+ >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
+ >>> model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
+ >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
+
+ >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
+ >>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file
+
+ >>> set_seed(555) # make deterministic
+
+ >>> # generate speech
+ >>> speech = model.generate(inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder)
+ >>> speech.shape
+ torch.Size([15872])
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_values is None:
+ decoder_input_values = shift_spectrograms_right(labels, self.config.reduction_factor)
+ if self.config.use_guided_attention_loss:
+ output_attentions = True
+
+ outputs = self.speecht5(
+ input_values=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_values=decoder_input_values,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ speaker_embeddings=speaker_embeddings,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ outputs_before_postnet, outputs_after_postnet, logits = self.speech_decoder_postnet(outputs[0])
+
+ loss = None
+ if labels is not None:
+ criterion = SpeechT5SpectrogramLoss(self.config)
+ loss = criterion(
+ attention_mask,
+ outputs_before_postnet,
+ outputs_after_postnet,
+ logits,
+ labels,
+ outputs.cross_attentions,
+ )
+
+ if not return_dict:
+ output = (outputs_after_postnet,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqSpectrogramOutput(
+ loss=loss,
+ spectrogram=outputs_after_postnet,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.LongTensor] = None,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ threshold: float = 0.5,
+ minlenratio: float = 0.0,
+ maxlenratio: float = 20.0,
+ vocoder: Optional[nn.Module] = None,
+ output_cross_attentions: bool = False,
+ return_output_lengths: bool = False,
+ **kwargs,
+ ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
+ r"""
+ Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
+ speech waveform using a vocoder.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
+ [`~PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Attention mask from the tokenizer, required for batched inference to signal to the model where to
+ ignore padded tokens from the input_ids.
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+ threshold (`float`, *optional*, defaults to 0.5):
+ The generated sequence ends when the predicted stop token probability exceeds this value.
+ minlenratio (`float`, *optional*, defaults to 0.0):
+ Used to calculate the minimum required length for the output sequence.
+ maxlenratio (`float`, *optional*, defaults to 20.0):
+ Used to calculate the maximum allowed length for the output sequence.
+ vocoder (`nn.Module`, *optional*):
+ The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
+ spectrogram.
+ output_cross_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of the decoder's cross-attention layers.
+ return_output_lengths (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the concrete spectrogram/waveform lengths.
+
+ Returns:
+ `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
+ - when `return_output_lengths` is False
+ - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
+ - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(num_frames,)` -- The predicted speech waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ - when `return_output_lengths` is True
+ - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that
+ are padded to the maximum length.
+ - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `List[Int]` -- A list of
+ all the concrete lengths for each spectrogram.
+ - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length.
+ - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `List[Int]` -- A list of all
+ the concrete lengths for each waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ """
+ if speaker_embeddings is not None:
+ batch_size = input_ids.size(0)
+ if speaker_embeddings.size(0) != batch_size:
+ if speaker_embeddings.size(0) == 1:
+ speaker_embeddings = speaker_embeddings.repeat(batch_size, 1)
+ else:
+ raise ValueError(
+ "The first dimension of speaker_embeddings must be either 1 or the same as batch_size."
+ )
+
+ return _generate_speech(
+ self,
+ input_ids,
+ speaker_embeddings,
+ attention_mask,
+ threshold,
+ minlenratio,
+ maxlenratio,
+ vocoder,
+ output_cross_attentions,
+ return_output_lengths,
+ )
+
+ @torch.no_grad()
+ def generate_speech(
+ self,
+ input_ids: torch.LongTensor,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ threshold: float = 0.5,
+ minlenratio: float = 0.0,
+ maxlenratio: float = 20.0,
+ vocoder: Optional[nn.Module] = None,
+ output_cross_attentions: bool = False,
+ return_output_lengths: bool = False,
+ ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
+ r"""
+ Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
+ speech waveform using a vocoder.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
+ [`~PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ threshold (`float`, *optional*, defaults to 0.5):
+ The generated sequence ends when the predicted stop token probability exceeds this value.
+ minlenratio (`float`, *optional*, defaults to 0.0):
+ Used to calculate the minimum required length for the output sequence.
+ maxlenratio (`float`, *optional*, defaults to 20.0):
+ Used to calculate the maximum allowed length for the output sequence.
+ vocoder (`nn.Module`, *optional*, defaults to `None`):
+ The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
+ spectrogram.
+ output_cross_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of the decoder's cross-attention layers.
+ return_output_lengths (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the concrete spectrogram/waveform lengths.
+
+ Returns:
+ `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
+ - when `return_output_lengths` is False
+ - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
+ - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(num_frames,)` -- The predicted speech waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ - when `return_output_lengths` is True
+ - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that
+ are padded to the maximum length.
+ - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `List[Int]` -- A list of
+ all the concrete lengths for each spectrogram.
+ - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length.
+ - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `List[Int]` -- A list of all
+ the concrete lengths for each waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ """
+ if speaker_embeddings is not None:
+ batch_size = input_ids.size(0)
+ if speaker_embeddings.size(0) != batch_size:
+ if speaker_embeddings.size(0) == 1:
+ speaker_embeddings = speaker_embeddings.repeat(batch_size, 1)
+ else:
+ raise ValueError(
+ "The first dimension of speaker_embeddings must be either 1 or the same as batch size."
+ )
+
+ return _generate_speech(
+ self,
+ input_ids,
+ speaker_embeddings,
+ attention_mask,
+ threshold,
+ minlenratio,
+ maxlenratio,
+ vocoder,
+ output_cross_attentions,
+ return_output_lengths,
+ )
+
+
+@add_start_docstrings(
+ """SpeechT5 Model with a speech encoder and a speech decoder.""",
+ SPEECHT5_START_DOCSTRING,
+)
+class SpeechT5ForSpeechToSpeech(SpeechT5PreTrainedModel):
+ def __init__(self, config: SpeechT5Config):
+ super().__init__(config)
+
+ speech_encoder = SpeechT5EncoderWithSpeechPrenet(config)
+ speech_decoder = SpeechT5DecoderWithSpeechPrenet(config)
+ self.speecht5 = SpeechT5Model(config, speech_encoder, speech_decoder)
+
+ self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.speecht5.get_encoder()
+
+ def get_decoder(self):
+ return self.speecht5.get_decoder()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.get_encoder().prenet.freeze_feature_encoder()
+
+ @add_start_docstrings_to_model_forward(SPEECHT5_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqSpectrogramOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ decoder_input_values: Optional[torch.FloatTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ decoder_head_mask: Optional[torch.FloatTensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.FloatTensor] = None,
+ stop_labels: Optional[torch.Tensor] = None,
+ ) -> Union[Tuple, Seq2SeqSpectrogramOutput]:
+ r"""
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
+ into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install
+ soundfile*). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding
+ and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.
+ decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):
+ Float values of input mel spectrogram.
+
+ SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see
+ `past_key_values`).
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+ labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):
+ Float values of target mel spectrogram. Spectrograms can be obtained using [`SpeechT5Processor`]. See
+ [`SpeechT5Processor.__call__`] for details.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech, SpeechT5HifiGan, set_seed
+ >>> from datasets import load_dataset
+ >>> import torch
+
+ >>> dataset = load_dataset(
+ ... "hf-internal-testing/librispeech_asr_demo", "clean", split="validation"
+ ... ) # doctest: +IGNORE_RESULT
+ >>> dataset = dataset.sort("id")
+ >>> sampling_rate = dataset.features["audio"].sampling_rate
+
+ >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc")
+ >>> model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc")
+ >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
+
+ >>> # audio file is decoded on the fly
+ >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
+
+ >>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file
+
+ >>> set_seed(555) # make deterministic
+
+ >>> # generate speech
+ >>> speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder)
+ >>> speech.shape
+ torch.Size([77824])
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if decoder_input_values is None:
+ decoder_input_values = shift_spectrograms_right(labels, self.config.reduction_factor)
+
+ outputs = self.speecht5(
+ input_values=input_values,
+ attention_mask=attention_mask,
+ decoder_input_values=decoder_input_values,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ speaker_embeddings=speaker_embeddings,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=True,
+ )
+
+ _, spectrogram, logits = self.speech_decoder_postnet(outputs[0])
+
+ loss = None
+
+ if not return_dict:
+ output = (spectrogram,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqSpectrogramOutput(
+ loss=loss,
+ spectrogram=spectrogram,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ @torch.no_grad()
+ def generate_speech(
+ self,
+ input_values: torch.FloatTensor,
+ speaker_embeddings: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ threshold: float = 0.5,
+ minlenratio: float = 0.0,
+ maxlenratio: float = 20.0,
+ vocoder: Optional[nn.Module] = None,
+ output_cross_attentions: bool = False,
+ return_output_lengths: bool = False,
+ ) -> torch.FloatTensor:
+ r"""
+ Converts a raw speech waveform into a sequence of mel spectrograms, which are subsequently turned back into a
+ speech waveform using a vocoder.
+
+ Args:
+ input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Float values of input raw speech waveform.
+
+ Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `List[float]` or
+ a `numpy.ndarray`, *e.g.* via the soundfile library (*pip install soundfile*). To prepare the array
+ into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor
+ of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details.
+ speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
+ Tensor containing the speaker embeddings.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
+ `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ threshold (`float`, *optional*, defaults to 0.5):
+ The generated sequence ends when the predicted stop token probability exceeds this value.
+ minlenratio (`float`, *optional*, defaults to 0.0):
+ Used to calculate the minimum required length for the output sequence.
+ maxlenratio (`float`, *optional*, defaults to 20.0):
+ Used to calculate the maximum allowed length for the output sequence.
+ vocoder (`nn.Module`, *optional*, defaults to `None`):
+ The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
+ spectrogram.
+ output_cross_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of the decoder's cross-attention layers.
+ return_output_lengths (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the concrete spectrogram/waveform lengths.
+
+ Returns:
+ `tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
+ - when `return_output_lengths` is False
+ - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
+ - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(num_frames,)` -- The predicted speech waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ - when `return_output_lengths` is True
+ - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that
+ are padded to the maximum length.
+ - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `List[Int]` -- A list of
+ all the concrete lengths for each spectrogram.
+ - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
+ `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length.
+ - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `List[Int]` -- A list of all
+ the concrete lengths for each waveform.
+ - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
+ `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads,
+ output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
+ """
+ if speaker_embeddings is None:
+ speaker_embeddings = torch.zeros((1, 512), device=input_values.device)
+
+ return _generate_speech(
+ self,
+ input_values,
+ speaker_embeddings,
+ attention_mask,
+ threshold,
+ minlenratio,
+ maxlenratio,
+ vocoder,
+ output_cross_attentions,
+ return_output_lengths,
+ )
+
+
+HIFIGAN_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`SpeechT5HifiGanConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+class HifiGanResidualBlock(nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1):
+ super().__init__()
+ self.leaky_relu_slope = leaky_relu_slope
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=dilation[i],
+ padding=self.get_padding(kernel_size, dilation[i]),
+ )
+ for i in range(len(dilation))
+ ]
+ )
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ padding=self.get_padding(kernel_size, 1),
+ )
+ for _ in range(len(dilation))
+ ]
+ )
+
+ def get_padding(self, kernel_size, dilation=1):
+ return (kernel_size * dilation - dilation) // 2
+
+ def apply_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.weight_norm(layer)
+
+ def remove_weight_norm(self):
+ for layer in self.convs1:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.convs2:
+ nn.utils.remove_weight_norm(layer)
+
+ def forward(self, hidden_states):
+ for conv1, conv2 in zip(self.convs1, self.convs2):
+ residual = hidden_states
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv1(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = conv2(hidden_states)
+ hidden_states = hidden_states + residual
+ return hidden_states
+
+
+@add_start_docstrings(
+ """HiFi-GAN vocoder.""",
+ HIFIGAN_START_DOCSTRING,
+)
+class SpeechT5HifiGan(PreTrainedModel):
+ config_class = SpeechT5HifiGanConfig
+ main_input_name = "spectrogram"
+
+ def __init__(self, config: SpeechT5HifiGanConfig):
+ super().__init__(config)
+ self.num_kernels = len(config.resblock_kernel_sizes)
+ self.num_upsamples = len(config.upsample_rates)
+ self.conv_pre = nn.Conv1d(
+ config.model_in_dim,
+ config.upsample_initial_channel,
+ kernel_size=7,
+ stride=1,
+ padding=3,
+ )
+
+ self.upsampler = nn.ModuleList()
+ for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
+ self.upsampler.append(
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**i),
+ config.upsample_initial_channel // (2 ** (i + 1)),
+ kernel_size=kernel_size,
+ stride=upsample_rate,
+ padding=(kernel_size - upsample_rate) // 2,
+ )
+ )
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.upsampler)):
+ channels = config.upsample_initial_channel // (2 ** (i + 1))
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
+ self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
+
+ self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
+
+ self.register_buffer("mean", torch.zeros(config.model_in_dim))
+ self.register_buffer("scale", torch.ones(config.model_in_dim))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.conv_pre)
+ for layer in self.upsampler:
+ nn.utils.weight_norm(layer)
+ for layer in self.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.conv_post)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.conv_pre)
+ for layer in self.upsampler:
+ nn.utils.remove_weight_norm(layer)
+ for layer in self.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.conv_post)
+
+ def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
+ r"""
+ Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
+ of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
+ waveform.
+
+ Args:
+ spectrogram (`torch.FloatTensor`):
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
+ config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
+
+ Returns:
+ `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
+ shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
+ """
+ if self.config.normalize_before:
+ spectrogram = (spectrogram - self.mean) / self.scale
+
+ is_batched = spectrogram.dim() == 3
+ if not is_batched:
+ spectrogram = spectrogram.unsqueeze(0)
+
+ hidden_states = spectrogram.transpose(2, 1)
+
+ hidden_states = self.conv_pre(hidden_states)
+ for i in range(self.num_upsamples):
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
+ hidden_states = self.upsampler[i](hidden_states)
+
+ res_state = self.resblocks[i * self.num_kernels](hidden_states)
+ for j in range(1, self.num_kernels):
+ res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
+ hidden_states = res_state / self.num_kernels
+
+ hidden_states = nn.functional.leaky_relu(hidden_states)
+ hidden_states = self.conv_post(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+
+ if not is_batched:
+ # remove batch dim and collapse tensor to 1-d audio waveform
+ waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
+ else:
+ # remove seq-len dim since this collapses to 1
+ waveform = hidden_states.squeeze(1)
+
+ return waveform
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/number_normalizer.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/number_normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb3314c24f24c1f8b9bc760c4ece69e0a2819888
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/number_normalizer.py
@@ -0,0 +1,192 @@
+# coding=utf-8
+# Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Number Normalizer class for SpeechT5."""
+
+import re
+
+
+class EnglishNumberNormalizer:
+ def __init__(self):
+ self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
+ self.teens = [
+ "",
+ "eleven",
+ "twelve",
+ "thirteen",
+ "fourteen",
+ "fifteen",
+ "sixteen",
+ "seventeen",
+ "eighteen",
+ "nineteen",
+ ]
+ self.tens = ["", "ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
+ self.thousands = [
+ "",
+ "thousand",
+ "million",
+ "billion",
+ "trillion",
+ "quadrillion",
+ "quintillion",
+ "sextillion",
+ "septillion",
+ "octillion",
+ "nonillion",
+ "decillion",
+ ]
+
+ # Define a dictionary to map currency symbols to their names
+ # Top most traded currencies according to
+ # https://en.wikipedia.org/wiki/Template:Most_traded_currencies
+ self.currency_symbols = {
+ "$": " dollars",
+ "€": " euros",
+ "£": " pounds",
+ "¢": " cents",
+ "¥": " japanese yen",
+ "﷼": " saudi riyal",
+ "₹": " indian rupees",
+ "₽": " russian rubles",
+ "฿": " thai baht",
+ "₺": " turkish liras",
+ "₴": " ukrainian hryvnia",
+ "₣": " swiss francs",
+ "₡": " costa rican colon",
+ "₱": " philippine peso",
+ "₪": " israeli shekels",
+ "₮": " mongolian tögrög",
+ "₩": " south korean won",
+ "₦": " nigerian naira",
+ "₫": " vietnamese Đồng",
+ }
+
+ def spell_number(self, num):
+ if num == 0:
+ return "zero"
+
+ parts = []
+ for i in range(0, len(self.thousands)):
+ if num % 1000 != 0:
+ part = ""
+ hundreds = num % 1000 // 100
+ tens_units = num % 100
+
+ if hundreds > 0:
+ part += self.ones[hundreds] + " hundred"
+ if tens_units > 0:
+ part += " and "
+
+ if tens_units > 10 and tens_units < 20:
+ part += self.teens[tens_units - 10]
+ else:
+ tens_digit = self.tens[tens_units // 10]
+ ones_digit = self.ones[tens_units % 10]
+ if tens_digit:
+ part += tens_digit
+ if ones_digit:
+ if tens_digit:
+ part += " "
+ part += ones_digit
+
+ parts.append(part)
+
+ num //= 1000
+
+ return " ".join(reversed(parts))
+
+ def convert(self, number):
+ """
+ Converts an individual number passed in string form to spelt-out form
+ """
+ if "." in number:
+ integer_part, decimal_part = number.split(".")
+ else:
+ integer_part, decimal_part = number, "00"
+
+ # Extract currency symbol if present
+ currency_symbol = ""
+ for symbol, name in self.currency_symbols.items():
+ if integer_part.startswith(symbol):
+ currency_symbol = name
+ integer_part = integer_part[len(symbol) :]
+ break
+
+ if integer_part.startswith("-"):
+ if integer_part[1:].startswith(symbol):
+ currency_symbol = name
+ integer_part = "-" + integer_part[len(symbol) + 1 :]
+ break
+
+ # Extract 'minus' prefix for negative numbers
+ minus_prefix = ""
+ if integer_part.startswith("-"):
+ minus_prefix = "minus "
+ integer_part = integer_part[1:]
+ elif integer_part.startswith("minus"):
+ minus_prefix = "minus "
+ integer_part = integer_part[len("minus") :]
+
+ percent_suffix = ""
+ if "%" in integer_part or "%" in decimal_part:
+ percent_suffix = " percent"
+ integer_part = integer_part.replace("%", "")
+ decimal_part = decimal_part.replace("%", "")
+
+ integer_part = integer_part.zfill(3 * ((len(integer_part) - 1) // 3 + 1))
+
+ parts = []
+ for i in range(0, len(integer_part), 3):
+ chunk = int(integer_part[i : i + 3])
+ if chunk > 0:
+ part = self.spell_number(chunk)
+ unit = self.thousands[len(integer_part[i:]) // 3 - 1]
+ if unit:
+ part += " " + unit
+ parts.append(part)
+
+ spelled_integer = " ".join(parts)
+
+ # Format the spelt-out number based on conditions, such as:
+ # If it has decimal parts, currency symbol, minus prefix, etc
+ if decimal_part == "00":
+ return (
+ f"{minus_prefix}{spelled_integer}{percent_suffix}{currency_symbol}"
+ if minus_prefix or currency_symbol
+ else f"{spelled_integer}{percent_suffix}"
+ )
+ else:
+ spelled_decimal = " ".join([self.spell_number(int(digit)) for digit in decimal_part])
+ return (
+ f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}{currency_symbol}"
+ if minus_prefix or currency_symbol
+ else f"{minus_prefix}{spelled_integer} point {spelled_decimal}{percent_suffix}"
+ )
+
+ def __call__(self, text):
+ """
+ Convert numbers / number-like quantities in a string to their spelt-out counterparts
+ """
+ # Form part of the pattern for all currency symbols
+ pattern = r"(? 15000, etc)
+ text = re.sub(r"(\d+,\d+)", lambda match: match.group(1).replace(",", ""), text)
+
+ # Use regex to find and replace numbers in the text
+ converted_text = re.sub(pattern, lambda match: self.convert(match.group(1)), text)
+ converted_text = re.sub(" +", " ", converted_text)
+
+ return converted_text
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/processing_speecht5.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/processing_speecht5.py
new file mode 100644
index 0000000000000000000000000000000000000000..468a0c1d89ab21c3ae4f4cba7947a8535cc42f14
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/processing_speecht5.py
@@ -0,0 +1,183 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Speech processor class for SpeechT5."""
+
+from ...processing_utils import ProcessorMixin
+
+
+class SpeechT5Processor(ProcessorMixin):
+ r"""
+ Constructs a SpeechT5 processor which wraps a feature extractor and a tokenizer into a single processor.
+
+ [`SpeechT5Processor`] offers all the functionalities of [`SpeechT5FeatureExtractor`] and [`SpeechT5Tokenizer`]. See
+ the docstring of [`~SpeechT5Processor.__call__`] and [`~SpeechT5Processor.decode`] for more information.
+
+ Args:
+ feature_extractor (`SpeechT5FeatureExtractor`):
+ An instance of [`SpeechT5FeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`SpeechT5Tokenizer`):
+ An instance of [`SpeechT5Tokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "SpeechT5FeatureExtractor"
+ tokenizer_class = "SpeechT5Tokenizer"
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Processes audio and text input, as well as audio and text targets.
+
+ You can process audio by using the argument `audio`, or process audio targets by using the argument
+ `audio_target`. This forwards the arguments to SpeechT5FeatureExtractor's
+ [`~SpeechT5FeatureExtractor.__call__`].
+
+ You can process text by using the argument `text`, or process text labels by using the argument `text_target`.
+ This forwards the arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.__call__`].
+
+ Valid input combinations are:
+
+ - `text` only
+ - `audio` only
+ - `text_target` only
+ - `audio_target` only
+ - `text` and `audio_target`
+ - `audio` and `audio_target`
+ - `text` and `text_target`
+ - `audio` and `text_target`
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ audio = kwargs.pop("audio", None)
+ text = kwargs.pop("text", None)
+ text_target = kwargs.pop("text_target", None)
+ audio_target = kwargs.pop("audio_target", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+
+ if audio is not None and text is not None:
+ raise ValueError(
+ "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?"
+ )
+ if audio_target is not None and text_target is not None:
+ raise ValueError(
+ "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?"
+ )
+ if audio is None and audio_target is None and text is None and text_target is None:
+ raise ValueError(
+ "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process."
+ )
+
+ if audio is not None:
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
+ elif text is not None:
+ inputs = self.tokenizer(text, **kwargs)
+ else:
+ inputs = None
+
+ if audio_target is not None:
+ targets = self.feature_extractor(audio_target=audio_target, *args, sampling_rate=sampling_rate, **kwargs)
+ labels = targets["input_values"]
+ elif text_target is not None:
+ targets = self.tokenizer(text_target, **kwargs)
+ labels = targets["input_ids"]
+ else:
+ targets = None
+
+ if inputs is None:
+ return targets
+
+ if targets is not None:
+ inputs["labels"] = labels
+
+ decoder_attention_mask = targets.get("attention_mask")
+ if decoder_attention_mask is not None:
+ inputs["decoder_attention_mask"] = decoder_attention_mask
+
+ return inputs
+
+ def pad(self, *args, **kwargs):
+ """
+ Collates the audio and text inputs, as well as their targets, into a padded batch.
+
+ Audio inputs are padded by SpeechT5FeatureExtractor's [`~SpeechT5FeatureExtractor.pad`]. Text inputs are padded
+ by SpeechT5Tokenizer's [`~SpeechT5Tokenizer.pad`].
+
+ Valid input combinations are:
+
+ - `input_ids` only
+ - `input_values` only
+ - `labels` only, either log-mel spectrograms or text tokens
+ - `input_ids` and log-mel spectrogram `labels`
+ - `input_values` and text `labels`
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ input_values = kwargs.pop("input_values", None)
+ input_ids = kwargs.pop("input_ids", None)
+ labels = kwargs.pop("labels", None)
+
+ if input_values is not None and input_ids is not None:
+ raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
+ if input_values is None and input_ids is None and labels is None:
+ raise ValueError(
+ "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded."
+ )
+
+ if input_values is not None:
+ inputs = self.feature_extractor.pad(input_values, *args, **kwargs)
+ elif input_ids is not None:
+ inputs = self.tokenizer.pad(input_ids, **kwargs)
+ else:
+ inputs = None
+
+ if labels is not None:
+ if "input_ids" in labels or (isinstance(labels, list) and "input_ids" in labels[0]):
+ targets = self.tokenizer.pad(labels, **kwargs)
+ labels = targets["input_ids"]
+ else:
+ feature_size_hack = self.feature_extractor.feature_size
+ self.feature_extractor.feature_size = self.feature_extractor.num_mel_bins
+ targets = self.feature_extractor.pad(labels, *args, **kwargs)
+ self.feature_extractor.feature_size = feature_size_hack
+ labels = targets["input_values"]
+ else:
+ targets = None
+
+ if inputs is None:
+ return targets
+
+ if targets is not None:
+ inputs["labels"] = labels
+
+ decoder_attention_mask = targets.get("attention_mask")
+ if decoder_attention_mask is not None:
+ inputs["decoder_attention_mask"] = decoder_attention_mask
+
+ return inputs
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.batch_decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to SpeechT5Tokenizer's [`~SpeechT5Tokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/speecht5/tokenization_speecht5.py b/venv/lib/python3.10/site-packages/transformers/models/speecht5/tokenization_speecht5.py
new file mode 100644
index 0000000000000000000000000000000000000000..41cb296f8f0d08184b1bb2576d066d62ebf74d63
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/speecht5/tokenization_speecht5.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2023 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for SpeechT5."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+from .number_normalizer import EnglishNumberNormalizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "spm_char.model"}
+
+
+class SpeechT5Tokenizer(PreTrainedTokenizer):
+ """
+ Construct a SpeechT5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
+ contains the vocabulary necessary to instantiate a tokenizer.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The begin of sequence token.
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ normalize (`bool`, *optional*, defaults to `False`):
+ Whether to convert numeric quantities in the text to their spelt-out english counterparts.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ unk_token="",
+ pad_token="",
+ normalize=False,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ self.vocab_file = vocab_file
+ self.normalize = normalize
+ self._normalizer = None
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(vocab_file)
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ normalize=normalize,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ normalize = kwargs.pop("normalize", self.normalize)
+ if is_split_into_words:
+ text = " " + text
+ if normalize:
+ text = self.normalizer(text)
+ return (text, kwargs)
+
+ @property
+ def vocab_size(self):
+ return self.sp_model.get_piece_size()
+
+ @property
+ def normalizer(self):
+ if self._normalizer is None:
+ self._normalizer = EnglishNumberNormalizer()
+ return self._normalizer
+
+ @normalizer.setter
+ def normalizer(self, value):
+ self._normalizer = value
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab_file)
+
+ def _tokenize(self, text: str) -> List[str]:
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.sp_model.piece_to_id(token)
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ token = self.sp_model.IdToPiece(index)
+ return token
+
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ current_sub_tokens = []
+ out_string = ""
+ prev_is_special = False
+ for token in tokens:
+ # make sure that special tokens are not decoded using sentencepiece model
+ if token in self.all_special_tokens:
+ if not prev_is_special:
+ out_string += " "
+ out_string += self.sp_model.decode(current_sub_tokens) + token
+ prev_is_special = True
+ current_sub_tokens = []
+ else:
+ current_sub_tokens.append(token)
+ prev_is_special = False
+ out_string += self.sp_model.decode(current_sub_tokens)
+ return out_string.strip()
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
+ """Build model inputs from a sequence by appending eos_token_id."""
+ if token_ids_1 is None:
+ return token_ids_0 + [self.eos_token_id]
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return token_ids_0 + token_ids_1 + [self.eos_token_id]
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ suffix_ones = [1]
+ if token_ids_1 is None:
+ return ([0] * len(token_ids_0)) + suffix_ones
+ return ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)