diff --git a/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..70b1c897980f4bc5dacf24d6a94b02ce7ec8c611 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46198f0d490fa17143187016e15d158ca84efcafea46ccfe5d91dda8e0b26bc3 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..cf7666464f33a059a19414b649d24cd7f746a029 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebfd91f176c4b8ce9b3795525983e33255e9a04480e63b4692d12b3abe0b7778 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2cc2d1fbfa5af8ac27557cbdb65854eea345a3f7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d6053a8dab8d5ed81b669e5569809ede3910dc2e615fcac85c2ca1957c4fc95 +size 9372 diff --git a/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..e7dced3d3f21d8f15c113f3b7854ff771f7cb518 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8082119865df759836867ddbfaf9152547cca6f03ddd4c61a268bd14aac036fc +size 9387 diff --git a/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..fe4e47a5091d294e471d010c551505282ebc2ec7 --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b67fa3a6d4f6079276d7d1aa7b8d931aea9838af0667abaf36483cc4eceafc8 +size 9293 diff --git a/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4b9cfab24cf13ee17aaab06178c89beae646bd44 --- /dev/null +++ b/ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7f9c4024e79471049640a1bc60425b091f525f1479a331ede254245f08d3b3d +size 50332828 diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3573b3bbfd5b190fed4ccaed4ac2846002aec22 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.2.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.0 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..0ca8649f4b37afa79be96336ed3ce93c8315c5db --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-22 18:59:44,431 INFO StreamThr :4100 [internal.py:wandb_internal():85] W&B internal server running at pid: 4100, started at: 2024-05-22 18:59:44.429193 +2024-05-22 18:59:44,436 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status +2024-05-22 18:59:44,437 INFO WriterThread:4100 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb +2024-05-22 18:59:44,439 DEBUG SenderThread:4100 [sender.py:send():378] send: header +2024-05-22 18:59:44,442 DEBUG SenderThread:4100 [sender.py:send():378] send: run +2024-05-22 18:59:44,700 INFO SenderThread:4100 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files +2024-05-22 18:59:44,701 INFO SenderThread:4100 [sender.py:_start_run_threads():1123] run started: 8sj20j0r with start time 1716404384.42905 +2024-05-22 18:59:44,705 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: check_version +2024-05-22 18:59:44,705 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: check_version +2024-05-22 18:59:44,826 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: run_start +2024-05-22 18:59:44,828 DEBUG HandlerThread:4100 [system_info.py:__init__():26] System info init +2024-05-22 18:59:44,829 DEBUG HandlerThread:4100 [system_info.py:__init__():41] System info init done +2024-05-22 18:59:44,829 INFO HandlerThread:4100 [system_monitor.py:start():194] Starting system monitor +2024-05-22 18:59:44,829 INFO SystemMonitor:4100 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-22 18:59:44,829 INFO HandlerThread:4100 [system_monitor.py:probe():214] Collecting system info +2024-05-22 18:59:44,836 INFO SystemMonitor:4100 [interfaces.py:start():188] Started cpu monitoring +2024-05-22 18:59:44,836 INFO SystemMonitor:4100 [interfaces.py:start():188] Started disk monitoring +2024-05-22 18:59:44,842 INFO SystemMonitor:4100 [interfaces.py:start():188] Started memory monitoring +2024-05-22 18:59:44,842 INFO SystemMonitor:4100 [interfaces.py:start():188] Started network monitoring +2024-05-22 18:59:44,922 DEBUG HandlerThread:4100 [system_info.py:probe():150] Probing system +2024-05-22 18:59:44,926 DEBUG HandlerThread:4100 [system_info.py:_probe_git():135] Probing git +2024-05-22 18:59:44,937 ERROR HandlerThread:4100 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_info.py:_probe_git():143] Probing git done +2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_info.py:probe():198] Probing system done +2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:59:44.922176', 'startedAt': '2024-05-22T18:59:44.408977', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2347.7606062500004, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3321.773, 'min': 800.0, 'max': 3400.0}, {'current': 3321.767, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.871, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6415901184082}}, 'memory': {'total': 1007.4379997253418}} +2024-05-22 18:59:44,937 INFO HandlerThread:4100 [system_monitor.py:probe():224] Finished collecting system info +2024-05-22 18:59:44,937 INFO HandlerThread:4100 [system_monitor.py:probe():227] Publishing system info +2024-05-22 18:59:44,942 INFO HandlerThread:4100 [system_monitor.py:probe():229] Finished publishing system info +2024-05-22 18:59:44,948 DEBUG SenderThread:4100 [sender.py:send():378] send: files +2024-05-22 18:59:44,948 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-22 18:59:45,131 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: python_packages +2024-05-22 18:59:45,131 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: python_packages +2024-05-22 18:59:45,132 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: stop_status +2024-05-22 18:59:45,133 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: stop_status +2024-05-22 18:59:45,252 DEBUG SenderThread:4100 [sender.py:send():378] send: telemetry +2024-05-22 18:59:45,539 INFO wandb-upload_0:4100 [upload_job.py:push():130] Uploaded file /tmp/tmp2xnq2_a6wandb/qzh7qybp-wandb-metadata.json +2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log +2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt +2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json +2024-05-22 18:59:47,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log +2024-05-22 18:59:50,258 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:59:55,628 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:59:55,711 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log +2024-05-22 18:59:55,920 DEBUG SenderThread:4100 [sender.py:send():378] send: exit +2024-05-22 18:59:55,920 INFO SenderThread:4100 [sender.py:send_exit():585] handling exit code: 1 +2024-05-22 18:59:55,920 INFO SenderThread:4100 [sender.py:send_exit():587] handling runtime: 11 +2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:send_exit():593] send defer +2024-05-22 18:59:55,922 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,922 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-22 18:59:55,922 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 1 +2024-05-22 18:59:55,923 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,923 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-22 18:59:55,923 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 2 +2024-05-22 18:59:55,923 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,923 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-22 18:59:55,923 INFO HandlerThread:4100 [system_monitor.py:finish():203] Stopping system monitor +2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined cpu monitor +2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined disk monitor +2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined memory monitor +2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined network monitor +2024-05-22 18:59:55,924 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,924 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 3 +2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 4 +2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 5 +2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send():378] send: summary +2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-22 18:59:55,929 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 6 +2024-05-22 18:59:55,929 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:55,929 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-22 18:59:55,929 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:55,930 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-22 18:59:55,934 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report +2024-05-22 18:59:56,072 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 7 +2024-05-22 18:59:56,072 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:56,072 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-22 18:59:56,072 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:56,072 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-22 18:59:56,712 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml +2024-05-22 18:59:56,712 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json +2024-05-22 18:59:56,920 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:59:57,280 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 8 +2024-05-22 18:59:57,281 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:59:57,281 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:57,281 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-22 18:59:57,281 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:57,281 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-22 18:59:57,281 INFO SenderThread:4100 [job_builder.py:build():432] Attempting to build job artifact +2024-05-22 18:59:57,282 INFO SenderThread:4100 [job_builder.py:_get_source_type():576] no source found +2024-05-22 18:59:57,282 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 9 +2024-05-22 18:59:57,282 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:57,282 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-22 18:59:57,282 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:57,282 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-22 18:59:57,282 INFO SenderThread:4100 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-22 18:59:57,713 INFO SenderThread:4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log +2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files +2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml config.yaml +2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log output.log +2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt requirements.txt +2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json wandb-metadata.json +2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json wandb-summary.json +2024-05-22 18:59:57,717 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 10 +2024-05-22 18:59:57,717 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:57,717 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-22 18:59:57,719 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:57,719 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-22 18:59:57,719 INFO SenderThread:4100 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:59:57,921 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:59:57,921 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:59:57,955 INFO wandb-upload_0:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml +2024-05-22 18:59:58,393 INFO wandb-upload_1:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log +2024-05-22 18:59:58,444 INFO wandb-upload_3:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json +2024-05-22 18:59:58,446 INFO wandb-upload_2:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt +2024-05-22 18:59:58,646 INFO Thread-11 (_thread_body):4100 [sender.py:transition_state():613] send defer: 11 +2024-05-22 18:59:58,646 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:58,646 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-22 18:59:58,646 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:58,646 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-22 18:59:58,646 INFO SenderThread:4100 [file_pusher.py:join():175] waiting for file pusher +2024-05-22 18:59:58,647 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 12 +2024-05-22 18:59:58,647 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:58,647 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-22 18:59:58,647 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:58,647 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-22 18:59:58,647 INFO SenderThread:4100 [file_stream.py:finish():601] file stream finish called +2024-05-22 18:59:58,708 INFO SenderThread:4100 [file_stream.py:finish():605] file stream finish is done +2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 13 +2024-05-22 18:59:58,709 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:58,709 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 14 +2024-05-22 18:59:58,709 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer +2024-05-22 18:59:58,709 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send():378] send: final +2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send():378] send: footer +2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer +2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: server_info +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: get_summary +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit +2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: server_info +2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-22 18:59:58,774 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: shutdown +2024-05-22 18:59:58,774 INFO HandlerThread:4100 [handler.py:finish():882] shutting down handler +2024-05-22 18:59:59,711 INFO WriterThread:4100 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb +2024-05-22 18:59:59,774 INFO SenderThread:4100 [sender.py:finish():1545] shutting down sender +2024-05-22 18:59:59,774 INFO SenderThread:4100 [file_pusher.py:finish():169] shutting down file pusher +2024-05-22 18:59:59,774 INFO SenderThread:4100 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..9b7f8ef859bf4d453b95818174d0d88b53e2759c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Configure stats pid to 3945 +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-22 18:59:44,423 WARNING MainThread:3945 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():560] calling init triggers +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():610] starting backend +2024-05-22 18:59:44,424 INFO MainThread:3945 [wandb_init.py:init():614] setting up manager +2024-05-22 18:59:44,428 INFO MainThread:3945 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-22 18:59:44,428 INFO MainThread:3945 [wandb_init.py:init():622] backend started and connected +2024-05-22 18:59:44,432 INFO MainThread:3945 [wandb_init.py:init():711] updated telemetry +2024-05-22 18:59:44,442 INFO MainThread:3945 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-22 18:59:44,704 INFO MainThread:3945 [wandb_run.py:_on_init():2396] communicating current version +2024-05-22 18:59:44,820 INFO MainThread:3945 [wandb_run.py:_on_init():2405] got version response +2024-05-22 18:59:44,820 INFO MainThread:3945 [wandb_init.py:init():795] starting run threads in backend +2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_console_start():2374] atexit reg +2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-22 18:59:45,134 INFO MainThread:3945 [wandb_init.py:init():838] run started, returning control to user process +2024-05-22 18:59:59,775 WARNING MsgRouterThr:3945 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3fce5e070a167a2e8421771f56f6fcedbaf5a0c --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716468780 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..1f51e78cca4782eccb9cfb55e47f8cbcc49902d9 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:53:00,829 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:53:10,418 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:53:10,422 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:53:10,423 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'} +2024-05-23:12:53:12,724 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..ee8cc65e30db48ccf7a321bbdbffffa21bcc6caa --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:53:00.613604", + "startedAt": "2024-05-23T12:53:00.074192", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.20563125, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.58670043945312 + } + }, + "memory": { + "total": 1007.4379348754883 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..018b4068e81a342faa2a3b0691dee6965106bcc3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 12}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..168a3bc9dcf5c2c73cd7d41ece48ca38cfa95aa7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 12:53:00,098 INFO StreamThr :804 [internal.py:wandb_internal():85] W&B internal server running at pid: 804, started at: 2024-05-23 12:53:00.094550 +2024-05-23 12:53:00,100 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:53:00,103 INFO WriterThread:804 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb +2024-05-23 12:53:00,105 DEBUG SenderThread:804 [sender.py:send():378] send: header +2024-05-23 12:53:00,107 DEBUG SenderThread:804 [sender.py:send():378] send: run +2024-05-23 12:53:00,399 INFO SenderThread:804 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files +2024-05-23 12:53:00,400 INFO SenderThread:804 [sender.py:_start_run_threads():1123] run started: yqqf3gci with start time 1716468780.094401 +2024-05-23 12:53:00,406 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:53:00,406 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:53:00,519 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:53:00,522 DEBUG HandlerThread:804 [system_info.py:__init__():26] System info init +2024-05-23 12:53:00,522 DEBUG HandlerThread:804 [system_info.py:__init__():41] System info init done +2024-05-23 12:53:00,522 INFO HandlerThread:804 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:53:00,522 INFO SystemMonitor:804 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:53:00,522 INFO HandlerThread:804 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:53:00,529 INFO SystemMonitor:804 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:53:00,529 INFO SystemMonitor:804 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:53:00,530 INFO SystemMonitor:804 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:53:00,532 INFO SystemMonitor:804 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:53:00,613 DEBUG HandlerThread:804 [system_info.py:probe():150] Probing system +2024-05-23 12:53:00,616 DEBUG HandlerThread:804 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:53:00,628 ERROR HandlerThread:804 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_info.py:probe():198] Probing system done +2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:53:00.613604', 'startedAt': '2024-05-23T12:53:00.074192', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.20563125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.58670043945312}}, 'memory': {'total': 1007.4379348754883}} +2024-05-23 12:53:00,629 INFO HandlerThread:804 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:53:00,629 INFO HandlerThread:804 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:53:00,631 INFO HandlerThread:804 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:53:00,637 DEBUG SenderThread:804 [sender.py:send():378] send: files +2024-05-23 12:53:00,637 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:53:00,822 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:53:00,823 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:53:00,823 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:53:00,824 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:53:00,970 DEBUG SenderThread:804 [sender.py:send():378] send: telemetry +2024-05-23 12:53:01,256 INFO wandb-upload_0:804 [upload_job.py:push():130] Uploaded file /tmp/tmpverirx0vwandb/s0a5dkg5-wandb-metadata.json +2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt +2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json +2024-05-23 12:53:03,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +2024-05-23 12:53:05,972 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:53:11,410 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +2024-05-23 12:53:11,424 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:53:12,739 DEBUG SenderThread:804 [sender.py:send():378] send: exit +2024-05-23 12:53:12,739 INFO SenderThread:804 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:53:12,739 INFO SenderThread:804 [sender.py:send_exit():587] handling runtime: 12 +2024-05-23 12:53:12,740 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:53:12,740 INFO SenderThread:804 [sender.py:send_exit():593] send defer +2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:53:12,741 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:53:12,741 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:53:12,741 INFO HandlerThread:804 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:53:12,741 DEBUG SystemMonitor:804 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:53:12,741 DEBUG SystemMonitor:804 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:53:12,742 DEBUG SystemMonitor:804 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:53:12,742 INFO HandlerThread:804 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:53:12,746 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,746 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:53:12,746 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:53:12,746 DEBUG SenderThread:804 [sender.py:send():378] send: stats +2024-05-23 12:53:12,747 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,747 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:53:12,747 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:53:12,747 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,747 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:53:12,747 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:53:12,748 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,748 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:53:12,748 DEBUG SenderThread:804 [sender.py:send():378] send: summary +2024-05-23 12:53:12,748 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:53:12,749 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:53:12,749 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,749 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:53:12,749 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:53:12,754 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:53:12,838 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:53:12,838 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:12,838 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:53:12,839 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:12,839 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:53:13,413 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml +2024-05-23 12:53:13,413 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json +2024-05-23 12:53:13,739 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:53:14,996 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:53:14,996 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:53:14,997 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:14,997 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:53:14,997 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:14,997 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:53:14,997 INFO SenderThread:804 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:53:14,997 INFO SenderThread:804 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:53:14,998 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:53:14,998 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:14,998 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:53:14,998 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:14,998 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:53:14,998 INFO SenderThread:804 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files +2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml config.yaml +2024-05-23 12:53:15,416 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log output.log +2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt requirements.txt +2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json wandb-summary.json +2024-05-23 12:53:15,419 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:53:15,419 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:15,419 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:53:15,419 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:15,419 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:53:15,419 INFO SenderThread:804 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:53:15,662 INFO wandb-upload_0:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml +2024-05-23 12:53:15,739 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:53:15,740 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:53:15,986 INFO wandb-upload_3:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json +2024-05-23 12:53:16,023 INFO wandb-upload_1:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +2024-05-23 12:53:16,031 INFO wandb-upload_2:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt +2024-05-23 12:53:16,232 INFO Thread-11 (_thread_body):804 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:53:16,232 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:16,232 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:53:16,232 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:16,232 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:53:16,232 INFO SenderThread:804 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:53:16,232 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:53:16,233 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:16,233 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:53:16,233 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:16,233 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:53:16,233 INFO SenderThread:804 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:53:16,443 INFO SenderThread:804 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:53:16,443 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:16,443 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:53:16,443 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:53:16,443 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:53:16,443 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send():378] send: final +2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send():378] send: footer +2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer +2024-05-23 12:53:16,444 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:53:16,444 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:53:16,445 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:53:16,446 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:53:16,508 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:53:16,508 INFO HandlerThread:804 [handler.py:finish():882] shutting down handler +2024-05-23 12:53:17,445 INFO WriterThread:804 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb +2024-05-23 12:53:17,507 INFO SenderThread:804 [sender.py:finish():1545] shutting down sender +2024-05-23 12:53:17,507 INFO SenderThread:804 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:53:17,507 INFO SenderThread:804 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..d59251f200f6229423e7b9dc4edb7abd3f47f1b7 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Configure stats pid to 648 +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:53:00,089 WARNING MainThread:648 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log +2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():610] starting backend +2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():614] setting up manager +2024-05-23 12:53:00,093 INFO MainThread:648 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:53:00,094 INFO MainThread:648 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:53:00,097 INFO MainThread:648 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:53:00,107 INFO MainThread:648 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:53:00,405 INFO MainThread:648 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:53:00,513 INFO MainThread:648 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:53:00,513 INFO MainThread:648 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:53:00,826 INFO MainThread:648 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:53:17,509 WARNING MsgRouterThr:648 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb new file mode 100644 index 0000000000000000000000000000000000000000..395b5e5967339a943274690e2cda0b4833f10d37 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a5daf9e45f4915eb0bc69466dc12fe8a54d6345 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml @@ -0,0 +1,44 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1717052687 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 13 + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..c865a19da2371fdcbd51a4572da18101a1e5f688 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log @@ -0,0 +1,38 @@ + +2024-05-30:07:04:48,090 INFO [__main__.py:251] Verbosity set to INFO +2024-05-30:07:04:57,969 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-30:07:04:57,971 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-30:07:04:57,971 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/'} +2024-05-30:07:05:00,271 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 237, in __init__ + self._create_tokenizer( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 630, in _create_tokenizer + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained + config = AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 644, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 699, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 360, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/ does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k//main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1dcd303402ec1af91076156f2e80507df9698421 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-30T07:04:47.890522", + "startedAt": "2024-05-30T07:04:47.365134", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/", + "--tasks", + "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq", + "--batch_size", + "auto", + "--wandb_args", + "project=english-eval,group=exp2,name=global_step70000" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-debug-35-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.4101375, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3399.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.89148712158203 + } + }, + "memory": { + "total": 1007.4379920959473 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..69d4fbf32cdd9bd715283063526672eb9f159b35 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log @@ -0,0 +1,196 @@ +2024-05-30 07:04:47,387 INFO StreamThr :900 [internal.py:wandb_internal():85] W&B internal server running at pid: 900, started at: 2024-05-30 07:04:47.385483 +2024-05-30 07:04:47,391 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status +2024-05-30 07:04:47,392 INFO WriterThread:900 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb +2024-05-30 07:04:47,395 DEBUG SenderThread:900 [sender.py:send():378] send: header +2024-05-30 07:04:47,399 DEBUG SenderThread:900 [sender.py:send():378] send: run +2024-05-30 07:04:47,689 INFO SenderThread:900 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files +2024-05-30 07:04:47,689 INFO SenderThread:900 [sender.py:_start_run_threads():1123] run started: fi4sos5j with start time 1717052687.38595 +2024-05-30 07:04:47,695 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: check_version +2024-05-30 07:04:47,696 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: check_version +2024-05-30 07:04:47,815 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: run_start +2024-05-30 07:04:47,817 DEBUG HandlerThread:900 [system_info.py:__init__():26] System info init +2024-05-30 07:04:47,817 DEBUG HandlerThread:900 [system_info.py:__init__():41] System info init done +2024-05-30 07:04:47,817 INFO HandlerThread:900 [system_monitor.py:start():194] Starting system monitor +2024-05-30 07:04:47,818 INFO SystemMonitor:900 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-30 07:04:47,818 INFO HandlerThread:900 [system_monitor.py:probe():214] Collecting system info +2024-05-30 07:04:47,824 INFO SystemMonitor:900 [interfaces.py:start():188] Started cpu monitoring +2024-05-30 07:04:47,825 INFO SystemMonitor:900 [interfaces.py:start():188] Started disk monitoring +2024-05-30 07:04:47,828 INFO SystemMonitor:900 [interfaces.py:start():188] Started memory monitoring +2024-05-30 07:04:47,828 INFO SystemMonitor:900 [interfaces.py:start():188] Started network monitoring +2024-05-30 07:04:47,890 DEBUG HandlerThread:900 [system_info.py:probe():150] Probing system +2024-05-30 07:04:47,893 DEBUG HandlerThread:900 [system_info.py:_probe_git():135] Probing git +2024-05-30 07:04:47,903 ERROR HandlerThread:900 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_info.py:_probe_git():143] Probing git done +2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_info.py:probe():198] Probing system done +2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T07:04:47.890522', 'startedAt': '2024-05-30T07:04:47.365134', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step70000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-35-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4101375, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.89148712158203}}, 'memory': {'total': 1007.4379920959473}} +2024-05-30 07:04:47,903 INFO HandlerThread:900 [system_monitor.py:probe():224] Finished collecting system info +2024-05-30 07:04:47,903 INFO HandlerThread:900 [system_monitor.py:probe():227] Publishing system info +2024-05-30 07:04:47,906 INFO HandlerThread:900 [system_monitor.py:probe():229] Finished publishing system info +2024-05-30 07:04:47,913 DEBUG SenderThread:900 [sender.py:send():378] send: files +2024-05-30 07:04:47,913 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-30 07:04:48,084 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: python_packages +2024-05-30 07:04:48,084 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: python_packages +2024-05-30 07:04:48,085 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:04:48,090 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:04:48,246 DEBUG SenderThread:900 [sender.py:send():378] send: telemetry +2024-05-30 07:04:48,551 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /tmp/tmpg7_ujvdqwandb/75nr9en3-wandb-metadata.json +2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json +2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt +2024-05-30 07:04:50,691 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:04:53,250 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:04:58,696 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:04:58,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:02,714 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:05:03,086 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:05:03,086 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:05:04,190 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:09,190 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:14,191 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:18,086 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status +2024-05-30 07:05:18,087 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status +2024-05-30 07:05:19,247 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:19,759 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml +2024-05-30 07:05:25,032 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:26,573 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:05:26,962 DEBUG SenderThread:900 [sender.py:send():378] send: exit +2024-05-30 07:05:26,962 INFO SenderThread:900 [sender.py:send_exit():585] handling exit code: 1 +2024-05-30 07:05:26,962 INFO SenderThread:900 [sender.py:send_exit():587] handling runtime: 39 +2024-05-30 07:05:26,963 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 07:05:26,963 INFO SenderThread:900 [sender.py:send_exit():593] send defer +2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-30 07:05:26,964 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 1 +2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-30 07:05:26,964 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 2 +2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-30 07:05:26,964 INFO HandlerThread:900 [system_monitor.py:finish():203] Stopping system monitor +2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-30 07:05:26,967 INFO HandlerThread:900 [interfaces.py:finish():200] Joined cpu monitor +2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined disk monitor +2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined memory monitor +2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined network monitor +2024-05-30 07:05:26,968 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,968 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-30 07:05:26,968 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 3 +2024-05-30 07:05:26,968 DEBUG SenderThread:900 [sender.py:send():378] send: stats +2024-05-30 07:05:26,969 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,969 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 4 +2024-05-30 07:05:26,970 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,970 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 5 +2024-05-30 07:05:26,970 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,970 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send():378] send: summary +2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-30 07:05:26,971 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 6 +2024-05-30 07:05:26,971 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,971 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-30 07:05:26,971 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 7 +2024-05-30 07:05:26,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report +2024-05-30 07:05:26,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:26,972 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-30 07:05:26,972 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:26,972 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-30 07:05:27,574 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json +2024-05-30 07:05:27,962 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:28,582 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 8 +2024-05-30 07:05:28,582 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:28,582 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:28,582 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-30 07:05:28,582 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:05:28,582 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:28,583 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-30 07:05:28,583 INFO SenderThread:900 [job_builder.py:build():432] Attempting to build job artifact +2024-05-30 07:05:28,583 INFO SenderThread:900 [job_builder.py:_get_source_type():576] no source found +2024-05-30 07:05:28,583 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 9 +2024-05-30 07:05:28,583 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:28,583 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-30 07:05:28,584 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:28,584 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-30 07:05:28,584 INFO SenderThread:900 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-30 07:05:28,962 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:29,583 INFO SenderThread:900 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files +2024-05-30 07:05:29,584 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json wandb-summary.json +2024-05-30 07:05:29,584 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log output.log +2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml config.yaml +2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt requirements.txt +2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json wandb-metadata.json +2024-05-30 07:05:29,587 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 10 +2024-05-30 07:05:29,587 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:29,587 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:29,587 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-30 07:05:29,587 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:29,587 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-30 07:05:29,587 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 07:05:29,833 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json +2024-05-30 07:05:29,963 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:29,963 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:30,198 INFO wandb-upload_1:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +2024-05-30 07:05:30,241 INFO wandb-upload_3:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt +2024-05-30 07:05:30,267 INFO wandb-upload_2:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml +2024-05-30 07:05:30,467 INFO Thread-11 (_thread_body):900 [sender.py:transition_state():613] send defer: 11 +2024-05-30 07:05:30,467 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:30,467 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-30 07:05:30,467 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:30,467 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-30 07:05:30,467 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher +2024-05-30 07:05:30,468 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 12 +2024-05-30 07:05:30,468 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:30,468 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-30 07:05:30,468 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:30,468 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-30 07:05:30,468 INFO SenderThread:900 [file_stream.py:finish():601] file stream finish called +2024-05-30 07:05:30,524 INFO SenderThread:900 [file_stream.py:finish():605] file stream finish is done +2024-05-30 07:05:30,524 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 13 +2024-05-30 07:05:30,524 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:30,524 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 14 +2024-05-30 07:05:30,525 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer +2024-05-30 07:05:30,525 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send():378] send: final +2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send():378] send: footer +2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer +2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-30 07:05:30,525 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:30,526 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: server_info +2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: get_summary +2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-30 07:05:30,527 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit +2024-05-30 07:05:30,527 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: server_info +2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-30 07:05:30,578 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: shutdown +2024-05-30 07:05:30,578 INFO HandlerThread:900 [handler.py:finish():882] shutting down handler +2024-05-30 07:05:31,527 INFO WriterThread:900 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb +2024-05-30 07:05:31,577 INFO SenderThread:900 [sender.py:finish():1545] shutting down sender +2024-05-30 07:05:31,577 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher +2024-05-30 07:05:31,577 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..a202b19b75f84822b1699164f3ae4b3365b2c35d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-30 07:04:47,379 INFO MainThread:744 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Configure stats pid to 744 +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-30 07:04:47,380 WARNING MainThread:744 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():560] calling init triggers +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():610] starting backend +2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():614] setting up manager +2024-05-30 07:04:47,384 INFO MainThread:744 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-30 07:04:47,385 INFO MainThread:744 [wandb_init.py:init():622] backend started and connected +2024-05-30 07:04:47,389 INFO MainThread:744 [wandb_init.py:init():711] updated telemetry +2024-05-30 07:04:47,398 INFO MainThread:744 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-30 07:04:47,695 INFO MainThread:744 [wandb_run.py:_on_init():2396] communicating current version +2024-05-30 07:04:47,809 INFO MainThread:744 [wandb_run.py:_on_init():2405] got version response +2024-05-30 07:04:47,809 INFO MainThread:744 [wandb_init.py:init():795] starting run threads in backend +2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_console_start():2374] atexit reg +2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-30 07:04:48,087 INFO MainThread:744 [wandb_init.py:init():838] run started, returning control to user process +2024-05-30 07:05:31,579 WARNING MsgRouterThr:744 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb new file mode 100644 index 0000000000000000000000000000000000000000..861a1def7ac03cfc213cd6642c2ffe0d2ba6a433 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33660eb81e4faebb7938bbba7ba165a2d7079d81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py @@ -0,0 +1,65 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gpt_bigcode"] = [ + "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTBigCodeForSequenceClassification", + "GPTBigCodeForTokenClassification", + "GPTBigCodeForCausalLM", + "GPTBigCodeModel", + "GPTBigCodePreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gpt_bigcode import ( + GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTBigCodeForCausalLM, + GPTBigCodeForSequenceClassification, + GPTBigCodeForTokenClassification, + GPTBigCodeModel, + GPTBigCodePreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33fcd957e00c98e0b36986523961c302af7e3a82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f47e0b12078bfa5215052e90fbc73c2fef0b50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3284c22b674c17e9e686233c228bd8cb48f095b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py new file mode 100644 index 0000000000000000000000000000000000000000..ef5e02ffdc43af1a72f7daaff59f18a3f95b5000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py @@ -0,0 +1,144 @@ +# coding=utf-8 +# Copyright 2023 The BigCode team and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GPTBigCode configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class GPTBigCodeConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a + GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the GPTBigCode + [gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPTBigCodeModel`]. + n_positions (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 768): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + n_inner (`int`, *optional*, defaults to None): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", + "gelu_pytorch_tanh"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_attn_weights (`bool`, *optional*, defaults to `True`): + Scale attention weights by dividing by sqrt(hidden_size).. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): + Whether to call the fused softmax in float32. + scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): + Whether to scale the attention softmax in float32. + attention_type (`bool`, *optional*, defaults to `True`): + Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`). + Example: + + ```python + >>> from transformers import GPTBigCodeConfig, GPTBigCodeModel + + >>> # Initializing a GPTBigCode configuration + >>> configuration = GPTBigCodeConfig() + + >>> # Initializing a model (with random weights) from the configuration + >>> model = GPTBigCodeModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "gpt_bigcode" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "hidden_size": "n_embd", + "max_position_embeddings": "n_positions", + "num_attention_heads": "n_head", + "num_hidden_layers": "n_layer", + } + + def __init__( + self, + vocab_size=50257, + n_positions=1024, + n_embd=768, + n_layer=12, + n_head=12, + n_inner=None, + activation_function="gelu_pytorch_tanh", + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + scale_attn_weights=True, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + attention_softmax_in_fp32=True, + scale_attention_softmax_in_fp32=True, + multi_query=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_head = n_head + self.n_inner = n_inner + self.activation_function = activation_function + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.scale_attn_weights = scale_attn_weights + self.use_cache = use_cache + self.attention_softmax_in_fp32 = attention_softmax_in_fp32 + self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 + self.multi_query = multi_query + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3b8498480c9e76a087b5fb4339a83d1b809710 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -0,0 +1,1504 @@ +# coding=utf-8 +# Copyright 2023 The Bigcode team and HuggingFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch GPTBigCode model.""" +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import AttentionMaskConverter +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import is_torch_greater_or_equal_than_2_2 +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, +) +from .configuration_gpt_bigcode import GPTBigCodeConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "bigcode/gpt_bigcode-santacoder" +_CONFIG_FOR_DOC = "GPTBigCodeConfig" + + +from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Fused kernels +# Use separate functions for each case because conditionals prevent kernel fusion. +# TODO: Could have better fused kernels depending on scaling, dropout and head mask. +# Is it doable without writing 32 functions? +@torch.jit.script +def upcast_masked_softmax( + x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype +): + input_dtype = x.dtype + x = x.to(softmax_dtype) * scale + x = torch.where(mask, x, mask_value) + x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) + return x + + +@torch.jit.script +def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype): + input_dtype = x.dtype + x = x.to(softmax_dtype) * scale + x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) + return x + + +@torch.jit.script +def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor): + x = torch.where(mask, x, mask_value) + x = torch.nn.functional.softmax(x, dim=-1) + return x + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class GPTBigCodeAttention(nn.Module): + def __init__(self, config, is_cross_attention=False, layer_idx=None): + super().__init__() + self.config = config + + self.mask_value = None + self.multi_query = config.multi_query + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.kv_heads = 1 if self.multi_query else self.num_heads + self.kv_dim = self.kv_heads * self.head_dim + self.split_size = self.embed_dim + self.is_causal = True + + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + + self.scale_attn_weights = config.scale_attn_weights + self.is_cross_attention = is_cross_attention + + self.layer_idx = layer_idx + self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 + self.scale_attention_softmax_in_fp32 = ( + config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32 + ) + self.attn_pdrop = config.attn_pdrop + + if self.is_cross_attention: + if self.multi_query: + raise NotImplementedError("Multi-Query Attention not supported for cross_attention") + + self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim) + self.q_attn = nn.Linear(self.embed_dim, self.embed_dim) + else: + self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim) + + self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + def _get_mask_value(self, device, dtype): + # torch.where expects a tensor. We use a cache to avoid recreating it every time. + if self.mask_value is None or self.mask_value.dtype != dtype or self.mask_value.device != device: + self.mask_value = torch.full([], torch.finfo(dtype).min, dtype=dtype, device=device) + return self.mask_value + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + dtype = query.dtype + softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else dtype + upcast = dtype != softmax_dtype + + unscale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1 + scale_factor = unscale**-1 + if self.scale_attn_weights: + scale_factor /= self.head_dim**0.5 + + # MQA models: (batch_size, query_length, num_heads * head_dim) + # MHA models: (batch_size, num_heads, query_length, head_dim) + query_shape = query.shape + batch_size = query_shape[0] + key_length = key.size(-1) + if self.multi_query: + # (batch_size, query_length, num_heads, head_dim) x (batch_size, head_dim, key_length) + # -> (batch_size, query_length, num_heads, key_length) + query_length = query_shape[1] + attn_shape = (batch_size, query_length, self.num_heads, key_length) + attn_view = (batch_size, query_length * self.num_heads, key_length) + # No copy needed for MQA 2, or when layer_past is provided. + query = query.reshape(batch_size, query_length * self.num_heads, self.head_dim) + else: + # (batch_size, num_heads, query_length, head_dim) x (batch_size, num_heads, head_dim, key_length) + # -> (batch_size, num_heads, query_length, key_length) + query_length = query_shape[2] + attn_shape = (batch_size, self.num_heads, query_length, key_length) + attn_view = (batch_size * self.num_heads, query_length, key_length) + # Always copies + query = query.reshape(batch_size * self.num_heads, query_length, self.head_dim) + # No copy when layer_past is provided. + key = key.reshape(batch_size * self.num_heads, self.head_dim, key_length) + + attn_weights = torch.empty(attn_view, device=query.device, dtype=query.dtype) + if query.device.type == "cpu": + # This is needed because of a bug in pytorch https://github.com/pytorch/pytorch/issues/80588. + # The bug was fixed in https://github.com/pytorch/pytorch/pull/96086, + # but the fix has not been released as of pytorch version 2.0.0. + attn_weights = torch.zeros_like(attn_weights) + beta = 1 + else: + beta = 0 + attn_weights = torch.baddbmm(attn_weights, query, key, beta=beta, alpha=scale_factor).view(attn_shape) + + if upcast: + # Use a fused kernel to prevent a large overhead from casting and scaling. + # Sub-optimal when the key length is not a multiple of 8. + if attention_mask is None: + attn_weights = upcast_softmax(attn_weights, unscale, softmax_dtype) + else: + mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) + attn_weights = upcast_masked_softmax(attn_weights, attention_mask, mask_value, unscale, softmax_dtype) + else: + if attention_mask is not None: + mask_value = self._get_mask_value(attn_weights.device, softmax_dtype) + + # The fused kernel is very slow when the key length is not a multiple of 8, so we skip fusion. + attn_weights = torch.where(attention_mask, attn_weights, mask_value) + + attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1) + + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + if self.multi_query: + head_mask = head_mask.transpose(1, 2) + attn_weights = attn_weights * head_mask + + if self.multi_query: + attn_output = torch.bmm(attn_weights.view(attn_view), value).view(query_shape) + else: + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def forward( + self, + hidden_states: torch.Tensor, + layer_past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor, Optional[torch.Tensor]], + Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], + ]: + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn") or not self.is_cross_attention: + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key_value = self.c_attn(encoder_hidden_states) + attention_mask = encoder_attention_mask + elif self.multi_query: + query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) + else: + # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), + # i.e., the memory layout is not the same as GPT2. + # This makes the concatenation with past_key_value more efficient. + query, key_value = ( + self.c_attn(hidden_states) + .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) + .transpose(1, 2) + .split((self.head_dim, 2 * self.head_dim), dim=3) + ) + + if layer_past is not None: + key_value = torch.cat((layer_past, key_value), dim=-2) + present = key_value if use_cache else None + + key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) + + attn_output, attn_weights = self._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) + + if not self.multi_query: + attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + if self.multi_query: + # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) + attn_weights = attn_weights.transpose(1, 2) + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPTBigCodeFlashAttention2(GPTBigCodeAttention): + """ + GPTBigCode flash attention module. This module inherits from `GPTBigCodeAttention` as the weights of the module + stays untouched. The only required change would be on the forward pass where it needs to correctly call the public + API of flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + layer_past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor, Optional[torch.Tensor]], + Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], + ]: + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn") or not self.is_cross_attention: + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key_value = self.c_attn(encoder_hidden_states) + attention_mask = encoder_attention_mask + elif self.multi_query: + query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) + else: + # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), + # i.e., the memory layout is not the same as GPT2. + # This makes the concatenation with past_key_value more efficient. + query, key_value = ( + self.c_attn(hidden_states) + .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) + .transpose(1, 2) + .split((self.head_dim, 2 * self.head_dim), dim=3) + ) + + if layer_past is not None: + key_value = torch.cat((layer_past, key_value), dim=-2) + present = key_value if use_cache else None + + key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + if self.multi_query: + batch_size, query_length, _ = query.shape + query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim) + key = key.unsqueeze(2) + value = value.unsqueeze(2) + else: + query_length = query.shape[2] + batch_size, _, tgt, _ = key.shape + query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim) + key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) + value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim) + + attn_dropout = self.attn_pdrop if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.c_attn.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attn_output = self._flash_attention_forward( + query, key, value, attention_mask, query_length, dropout=attn_dropout + ) + + attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim) + attn_output = self.c_proj(attn_weights_reshaped) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + + if output_attentions: + if self.multi_query: + # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) + attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2) + else: + attn_weights_reshaped = None + + outputs += (attn_weights_reshaped,) + + return outputs # a, present, (attentions) + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class GPTBigCodeSdpaAttention(GPTBigCodeAttention): + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + if head_mask is not None: + # The super dispatch is done in the forward. + raise ValueError( + "PyTorch SDPA does not support head_mask. Please open an issue in Transformers repository." + ) + + scale = None + if not self.scale_attn_weights: + scale = 1 + + # MQA models: (batch_size, query_length, num_heads * head_dim) + # MHA models: (batch_size, num_heads, query_length, head_dim) + query_shape = query.shape + batch_size = query_shape[0] + key.shape[-2] + + if self.multi_query: + query_length = query_shape[1] + + # SDPA requires the dimension [..., sequence_length, head_dim]. + query = query.view(batch_size, query_length, self.num_heads, self.head_dim).transpose(1, 2) + + # Without these unsqueeze, SDPA complains as the query and key/value have a different number of dimensions. + key = key.unsqueeze(1) + value = value.unsqueeze(1) + + # Although these expand are not numerically useful, PyTorch can not dispatch to memory-efficient backend + # and flash attention backend (No available kernel. Aborting execution.) from the shapes + # query = [batch_size, num_heads, query_length, head_dim] + # key = [batch_size, 1, past_length, head_dim] + # value = [batch_size, 1, past_length, head_dim] + # + # torch==2.1.2 is bugged with non-contiguous inputs with custom attn_mask (https://github.com/pytorch/pytorch/issues/112577), hence the check. + if is_torch_greater_or_equal_than_2_2: + key = key.expand(-1, self.num_heads, -1, -1) + value = value.expand(-1, self.num_heads, -1, -1) + else: + query_length = query_shape[-1] + + # See the comment above. + if query.device.type == "cuda" and attention_mask is not None: + query = query.contiguous() + key = key.contiguous() + value = value.contiguous() + + sdpa_result = torch.nn.functional.scaled_dot_product_attention( + query, + key, + value, + attn_mask=attention_mask, + dropout_p=self.attn_pdrop if self.training else 0.0, + # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case query_length == 1. + is_causal=self.is_causal and attention_mask is None and query_length > 1, + scale=scale, + ) + + if self.multi_query: + # (batch_size, num_heads, seq_len, head_dim) --> (batch_size, seq_len, num_heads, head_dim) + sdpa_result = sdpa_result.transpose(1, 2) + + # Reshape is kind of expensive here, as it does a memory copy, + # but I did not manage to make away without it (logits do not match when using view) + # (batch_size, seq_len, num_heads, head_dim) --> (batch_size, seq_len, num_heads * head_dim) + sdpa_result = sdpa_result.reshape(query_shape) + + return sdpa_result, None + + def forward( + self, + hidden_states: torch.Tensor, + layer_past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor, Optional[torch.Tensor]], + Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]], + ]: + if encoder_hidden_states is not None: + if not hasattr(self, "q_attn") or not self.is_cross_attention: + raise ValueError( + "If class is used as cross attention, the weights `q_attn` have to be defined. " + "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." + ) + + query = self.q_attn(hidden_states) + key_value = self.c_attn(encoder_hidden_states) + attention_mask = encoder_attention_mask + elif self.multi_query: + query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2) + else: + # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim), + # i.e., the memory layout is not the same as GPT2. + # This makes the concatenation with past_key_value more efficient. + query, key_value = ( + self.c_attn(hidden_states) + .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) + .transpose(1, 2) + .split((self.head_dim, 2 * self.head_dim), dim=3) + ) + + if layer_past is not None: + key_value = torch.cat((layer_past, key_value), dim=-2) + present = key_value if use_cache else None + + key, value = key_value.split((self.head_dim, self.head_dim), dim=-1) + + if not output_attentions and head_mask is None: + # Difference with the original implementation: there is no need to transpose the key here, + # as SDPA expects seq_length to be at index -2 for the key as well + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + else: + # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "GPTBigCodeModel is using GPTBigCodeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` and `head_mask` not None." + ' Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + attn_output, attn_weights = super()._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask) + + if not self.multi_query: + attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + if self.multi_query: + # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length) + attn_weights = attn_weights.transpose(1, 2) + outputs += (attn_weights,) + + return outputs + + +class GPTBigCodeMLP(nn.Module): + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = nn.Linear(embed_dim, intermediate_size) + self.c_proj = nn.Linear(intermediate_size, embed_dim) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward + def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +GPTBIGCODE_ATTENTION_CLASSES = { + "eager": GPTBigCodeAttention, + "flash_attention_2": GPTBigCodeFlashAttention2, + "sdpa": GPTBigCodeSdpaAttention, +} + + +class GPTBigCodeBlock(nn.Module): + def __init__(self, config, layer_idx=None): + super().__init__() + hidden_size = config.hidden_size + self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.attn = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) + + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + if config.add_cross_attention: + if config.multi_query: + raise NotImplementedError("Cross-attention not implemented for MQA") + + self.crossattention = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation]( + config, is_cross_attention=True, layer_idx=layer_idx + ) + + self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + self.mlp = GPTBigCodeMLP(self.inner_dim, config) + + def forward( + self, + hidden_states: Optional[Tuple[torch.Tensor]], + layer_past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + ]: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " + "cross-attention layers by setting `config.add_cross_attention=True`" + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + +class GPTBigCodePreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTBigCodeConfig + base_model_prefix = "transformer" + supports_gradient_checkpointing = True + _no_split_modules = ["GPTBigCodeBlock"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + _supports_sdpa = True + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)): + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + module.c_proj.weight.data.normal_( + mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) + ) + module.c_proj._is_hf_initialized = True + elif isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +GPT_BIGCODE_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`GPTBigCodeConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPT_BIGCODE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else + `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input + sequence tokens in the vocabulary. + + If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as + `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`Tuple[torch.Tensor]` of length `config.n_layers`): + Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see + `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have + their past given to this model should not be passed as `input_ids` as they have already been computed. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for + `past_key_values`. In other words, the `attention_mask` always has to have the length: + `len(past_key_values) + len(input_ids)` + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + + If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see + `past_key_values`). + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare GPT_BIGCODE Model transformer outputting raw hidden-states without any specific head on top.", + GPT_BIGCODE_START_DOCSTRING, +) +class GPTBigCodeModel(GPTBigCodePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.multi_query = config.multi_query + self.embed_dim = config.hidden_size + + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + max_positions = config.max_position_embeddings + self.register_buffer( + "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False + ) + + self.gradient_checkpointing = False + + self._use_sdpa = config._attn_implementation == "sdpa" + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if batch_size <= 0: + raise ValueError("batch_size has to be defined and > 0") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0].size(-2) + + if attention_mask is not None and len(attention_mask.shape) == 2 and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_length > 0: + position_ids = position_ids[:, past_length : input_shape[-1] + past_length :] + elif position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) + + # Self-attention mask. + query_length = input_shape[-1] + key_length = past_length + query_length + self_attention_mask = self.bias[None, key_length - query_length : key_length, :key_length] + + if self._use_flash_attention_2: + # 2d mask is passed through the layers + attention_mask = attention_mask.bool() if (attention_mask is not None and 0 in attention_mask) else None + encoder_attention_mask = ( + encoder_attention_mask.bool() + if (encoder_attention_mask is not None and 0 in encoder_attention_mask) + else None + ) + else: + # 4d mask is passed through the layers + if attention_mask is not None: + self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to( + dtype=torch.bool, device=self_attention_mask.device + ) + + # MQA models: (batch_size, query_length, n_heads, key_length) + # MHA models: (batch_size, n_heads, query_length, key_length) + self_attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1) + + if self._use_sdpa and head_mask is None and not output_attentions: + # SDPA with a custom mask is much faster in fp16/fp32 dtype rather than bool. Cast here to floating point instead of at every layer. + dtype = self.wte.weight.dtype + min_dtype = torch.finfo(dtype).min + self_attention_mask = torch.where( + self_attention_mask, + torch.full([], 0.0, dtype=dtype, device=self_attention_mask.device), + torch.full([], min_dtype, dtype=dtype, device=self_attention_mask.device), + ) + + # output_attentions=True can not be supported when using SDPA, and we fall back on + # the manual implementation that requires a 4D causal mask in all cases. + if self.multi_query: + # gpt_bigcode using MQA has the bad taste to use a causal mask with shape + # [batch_size, target_length, 1, source_length], not compatible with SDPA, hence this transpose. + self_attention_mask = self_attention_mask.transpose(1, 2) + + if query_length > 1 and attention_mask is not None and attention_mask.device.type == "cuda": + # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend + # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213 + self_attention_mask = AttentionMaskConverter._unmask_unattended( + self_attention_mask, min_dtype=min_dtype + ) + + attention_mask = self_attention_mask + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if ( + self.config.add_cross_attention + and encoder_hidden_states is not None + and encoder_attention_mask is not None + ): + if encoder_attention_mask.dim() == 2: + encoder_attention_mask.unsqueeze(1) + assert encoder_attention_mask.dim() == 3 + encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) + else: + encoder_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # head_mask has shape n_layer x batch x n_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = input_shape + (hidden_states.size(-1),) + + presents = [] if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = self._gradient_checkpointing_func( + block.__call__, + hidden_states, + None, + attention_mask, + head_mask[i], + encoder_hidden_states, + encoder_attention_mask, + use_cache, + output_attentions, + ) + else: + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache: + presents.append(outputs[1]) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] + if v is not None + ) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + """ + The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT_BIGCODE_START_DOCSTRING, +) +class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPTBigCodeModel(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # Omit tokens covered by past_key_values + if past_key_values: + if self.config.multi_query: + past_length = past_key_values[0].shape[1] + else: + past_length = past_key_values[0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + else: + position_ids = None + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + ) + return model_inputs + + @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous().to(shift_logits.device) + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + cross_attentions=transformer_outputs.cross_attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple(layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values) + + +@add_start_docstrings( + """ + The GPTBigCode Model transformer with a sequence classification head on top (linear layer). + + [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal + models (e.g. GPT-1) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPT_BIGCODE_START_DOCSTRING, +) +class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPTBigCodeModel(config) + self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size, sequence_length = input_ids.shape[:2] + else: + batch_size, sequence_length = inputs_embeds.shape[:2] + + assert ( + self.config.pad_token_id is not None or batch_size == 1 + ), "Cannot handle batch sizes > 1 if no padding token is defined." + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + GPT_BIGCODE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. + for Named-Entity-Recognition (NER) tasks. + """, + GPT_BIGCODE_START_DOCSTRING, +) +class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.transformer = GPTBigCodeModel(config) + if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: + classifier_dropout = config.classifier_dropout + elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: + classifier_dropout = config.hidden_dropout + else: + classifier_dropout = 0.1 + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device)) + + if not return_dict: + output = (logits,) + transformer_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/gptj/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e59ed4706204894516b966975dbbb88d462ab29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptj/__init__.py @@ -0,0 +1,112 @@ +# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gptj"] = [ + "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTJForCausalLM", + "GPTJForQuestionAnswering", + "GPTJForSequenceClassification", + "GPTJModel", + "GPTJPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_gptj"] = [ + "TFGPTJForCausalLM", + "TFGPTJForQuestionAnswering", + "TFGPTJForSequenceClassification", + "TFGPTJModel", + "TFGPTJPreTrainedModel", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_gptj"] = [ + "FlaxGPTJForCausalLM", + "FlaxGPTJModel", + "FlaxGPTJPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gptj import ( + GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTJForCausalLM, + GPTJForQuestionAnswering, + GPTJForSequenceClassification, + GPTJModel, + GPTJPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_gptj import ( + TFGPTJForCausalLM, + TFGPTJForQuestionAnswering, + TFGPTJForSequenceClassification, + TFGPTJModel, + TFGPTJPreTrainedModel, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3e6cf6dd215b62b375ae2f438607f86d126de76 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56c59b0e729409fed845b01240bfb17ef41b01b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9770df58dc5358f4e24ff5858a5c95f5605926d2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..482170846bde5a644f6fc469838310084edd93e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef70cc6b8050dbbdb32e34793ae016d0989e5ed4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py b/venv/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py new file mode 100644 index 0000000000000000000000000000000000000000..56d6042764a19a53f9181fed5a43f4c789361f8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GPT-J model configuration""" +from collections import OrderedDict +from typing import Any, List, Mapping, Optional + +from ... import PreTrainedTokenizer, TensorType, is_torch_available +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfigWithPast, PatchingSpec +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class GPTJConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the GPT-J + [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from + [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] + for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 50400): + Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPTJModel`]. + n_positions (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + n_embd (`int`, *optional*, defaults to 4096): + Dimensionality of the embeddings and hidden states. + n_layer (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + n_head (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + rotary_dim (`int`, *optional*, defaults to 64): + Number of dimensions in the embedding that Rotary Position Embedding is applied to. + n_inner (`int`, *optional*, defaults to None): + Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd + activation_function (`str`, *optional*, defaults to `"gelu_new"`): + Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. + resid_pdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + embd_pdrop (`int`, *optional*, defaults to 0.1): + The dropout ratio for the embeddings. + attn_pdrop (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon to use in the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + + Example: + + ```python + >>> from transformers import GPTJModel, GPTJConfig + + >>> # Initializing a GPT-J 6B configuration + >>> configuration = GPTJConfig() + + >>> # Initializing a model from the configuration + >>> model = GPTJModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "gptj" + attribute_map = { + "max_position_embeddings": "n_positions", + "hidden_size": "n_embd", + "num_attention_heads": "n_head", + "num_hidden_layers": "n_layer", + } + + def __init__( + self, + vocab_size=50400, + n_positions=2048, + n_embd=4096, + n_layer=28, + n_head=16, + rotary_dim=64, + n_inner=None, + activation_function="gelu_new", + resid_pdrop=0.0, + embd_pdrop=0.0, + attn_pdrop=0.0, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + tie_word_embeddings=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_head = n_head + self.n_inner = n_inner + self.rotary_dim = rotary_dim + self.activation_function = activation_function + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.use_cache = use_cache + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + super().__init__( + bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs + ) + + +# Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig +class GPTJOnnxConfig(OnnxConfigWithPast): + def __init__( + self, + config: PretrainedConfig, + task: str = "default", + patching_specs: List[PatchingSpec] = None, + use_past: bool = False, + ): + super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past) + if not getattr(self._config, "pad_token_id", None): + # TODO: how to do that better? + self._config.pad_token_id = 0 + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"} + else: + common_inputs["attention_mask"] = {0: "batch", 1: "sequence"} + + return common_inputs + + @property + def num_layers(self) -> int: + return self._config.n_layer + + @property + def num_attention_heads(self) -> int: + return self._config.n_head + + def generate_dummy_inputs( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + # We need to order the input in the way they appears in the forward() + ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]}) + + # Need to add the past_keys + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + past_shape = ( + batch, + self.num_attention_heads, + past_key_values_length, + self._config.hidden_size // self.num_attention_heads, + ) + ordered_inputs["past_key_values"] = [ + (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers) + ] + + ordered_inputs["attention_mask"] = common_inputs["attention_mask"] + if self.use_past: + mask_dtype = ordered_inputs["attention_mask"].dtype + ordered_inputs["attention_mask"] = torch.cat( + [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 + ) + + return ordered_inputs + + @property + def default_onnx_opset(self) -> int: + return 13 diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py new file mode 100644 index 0000000000000000000000000000000000000000..9f0d4d6e86000384544fa2873690b09d34a050a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py @@ -0,0 +1,718 @@ +# coding=utf-8 +# Copyright 2021 The EleutherAI and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput +from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_gptj import GPTJConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "gptj" +_CONFIG_FOR_DOC = "GPTJConfig" + + +GPTJ_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`GPTJConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +GPTJ_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +def create_sinusoidal_positions(num_pos, dim): + inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim)) + sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32") + sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp) + + sentinel = dim // 2 + dim % 2 + out = np.zeros((num_pos, dim)) + out[:, 0:sentinel] = sin + out[:, sentinel:] = cos + + return jnp.array(out) + + +def rotate_every_two(tensor): + rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1) + rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,)) + return rotate_half_tensor + + +def apply_rotary_pos_emb(tensor, sincos): + sin_pos, cos_pos = sincos + sin_pos = sin_pos[:, :, None, :].repeat(2, 3) + cos_pos = cos_pos[:, :, None, :].repeat(2, 3) + return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos) + + +class FlaxGPTJAttention(nn.Module): + config: GPTJConfig + dtype: jnp.dtype = jnp.float32 + causal: bool = True + is_cross_attention: bool = False + + def setup(self): + config = self.config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + + self.rotary_dim = config.rotary_dim + + dense = partial( + nn.Dense, + self.embed_dim, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + + self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() + self.out_proj = dense() + + self.resid_dropout = nn.Dropout(rate=config.resid_pdrop) + + self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") + + pos_embd_dim = self.rotary_dim or self.embed_dim + self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key + # positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states, + attention_mask, + position_ids, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query) + key = self._split_heads(key) + value = self._split_heads(value) + + sincos = jnp.take(self.embed_positions, position_ids, axis=0) + sincos = jnp.split(sincos, 2, axis=-1) + if self.rotary_dim is not None: + k_rot = key[:, :, :, : self.rotary_dim] + k_pass = key[:, :, :, self.rotary_dim :] + + q_rot = query[:, :, :, : self.rotary_dim] + q_pass = query[:, :, :, self.rotary_dim :] + + k_rot = apply_rotary_pos_emb(k_rot, sincos) + q_rot = apply_rotary_pos_emb(q_rot, sincos) + + key = jnp.concatenate([k_rot, k_pass], axis=-1) + query = jnp.concatenate([q_rot, q_pass], axis=-1) + else: + key = apply_rotary_pos_emb(key, sincos) + query = apply_rotary_pos_emb(query, sincos) + + query_length, key_length = query.shape[1], key.shape[1] + + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + + batch_size = hidden_states.shape[0] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + + dropout_rng = None + if not deterministic and self.config.attn_pdrop > 0.0: + dropout_rng = self.make_rng("dropout") + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.has_variable("cache", "cached_key") or init_cache: + key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) + + # transform boolean mask into float mask + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + + # usual dot product attention + attn_weights = dot_product_attention_weights( + query, + key, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.config.attn_pdrop, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output, deterministic=deterministic) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +class FlaxGPTJMLP(nn.Module): + config: GPTJConfig + intermediate_size: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + kernel_init = jax.nn.initializers.normal(self.config.initializer_range) + + self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) + self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) + + self.act = ACT2FN[self.config.activation_function] + self.dropout = nn.Dropout(rate=self.config.resid_pdrop) + + def __call__(self, hidden_states, deterministic: bool = True): + hidden_states = self.fc_in(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc_out(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + return hidden_states + + +class FlaxGPTJBlock(nn.Module): + config: GPTJConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + hidden_size = self.config.hidden_size + inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype) + + self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] + + feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) + # residual connection + hidden_states = attn_output + feed_forward_hidden_states + residual + + return (hidden_states,) + attn_outputs[1:] + + +class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTJConfig + base_model_prefix = "transformer" + module_class: nn.Module = None + + def __init__( + self, + config: GPTJConfig, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + if self.config.add_cross_attention: + encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,)) + encoder_attention_mask = attention_mask + module_init_outputs = self.module.init( + rngs, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states, + encoder_attention_mask, + return_dict=False, + ) + else: + module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False) + + random_params = module_init_outputs["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length)) + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return init_variables["cache"] + + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING) + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + past_key_values: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + batch_size, sequence_length = input_ids.shape + + if position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") + + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + if attention_mask is None: + attention_mask = jnp.ones((batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + False, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + return outputs + + +class FlaxGPTJBlockCollection(nn.Module): + config: GPTJConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.blocks = [ + FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + position_ids=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for block in self.blocks: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = block( + hidden_states, + attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + # this contains possible `None` values - `FlaxGPTJModule` will filter them out + outputs = (hidden_states, all_hidden_states, all_attentions) + + return outputs + + +class FlaxGPTJModule(nn.Module): + config: GPTJConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embed_dim = self.config.hidden_size + + self.wte = nn.Embed( + self.config.vocab_size, + self.config.hidden_size, + embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + self.dropout = nn.Dropout(rate=self.config.embd_pdrop) + self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype) + self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic=True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + input_embeds = self.wte(input_ids.astype("i4")) + + hidden_states = self.dropout(input_embeds, deterministic=deterministic) + + outputs = self.h( + hidden_states, + attention_mask, + position_ids=position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.ln_f(hidden_states) + + if output_hidden_states: + all_hidden_states = outputs[1] + (hidden_states,) + outputs = (hidden_states, all_hidden_states) + outputs[2:] + else: + outputs = (hidden_states,) + outputs[1:] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=outputs[1], + attentions=outputs[-1], + ) + + +@add_start_docstrings( + "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.", + GPTJ_START_DOCSTRING, +) +class FlaxGPTJModel(FlaxGPTJPreTrainedModel): + module_class = FlaxGPTJModule + + +append_call_sample_docstring( + FlaxGPTJModel, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutput, + _CONFIG_FOR_DOC, +) + + +class FlaxGPTJForCausalLMModule(nn.Module): + config: GPTJConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.vocab_size, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + outputs = self.transformer( + input_ids, + attention_mask, + position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T + lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) + else: + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + return (lm_logits,) + outputs[1:] + + return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) + + +@add_start_docstrings( + """ + The GPTJ Model transformer with a language modeling head on top. + """, + GPTJ_START_DOCSTRING, +) +class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel): + module_class = FlaxGPTJForCausalLMModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since GPTJ uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring( + FlaxGPTJForCausalLM, + _CHECKPOINT_FOR_DOC, + FlaxCausalLMOutput, + _CONFIG_FOR_DOC, +) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py new file mode 100644 index 0000000000000000000000000000000000000000..3c6ddac4ecf4ca057a17c7fb555912cfda29ad7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py @@ -0,0 +1,1427 @@ +# coding=utf-8 +# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch GPT-J model.""" + +import warnings +from typing import Optional, Tuple, Union + +import torch +import torch.fx +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + is_torch_fx_proxy, + logging, +) +from ...utils.model_parallel_utils import assert_device_map, get_device_map +from .configuration_gptj import GPTJConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj" +_REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B" +_CONFIG_FOR_DOC = "GPTJConfig" + + +from ..deprecated._archive_maps import GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)) + sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float() + return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) + + +@torch.fx.wrap +def get_embed_positions(embed_positions, position_ids): + return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1) + + +def rotate_every_two(x: torch.Tensor) -> torch.Tensor: + x1 = x[:, :, :, ::2] + x2 = x[:, :, :, 1::2] + x = torch.stack((-x2, x1), dim=-1) + return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)') + + +def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor: + sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3) + cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3) + return (tensor * cos) + (rotate_every_two(tensor) * sin) + + +class GPTJAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + max_positions = config.max_position_embeddings + self.register_buffer( + "bias", + torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( + 1, 1, max_positions, max_positions + ), + persistent=False, + ) + self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + self.is_causal = True + + self.embed_dim = config.hidden_size + self.num_attention_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_attention_heads + if self.head_dim * self.num_attention_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" + f" `num_attention_heads`: {self.num_attention_heads})." + ) + self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.rotary_dim = config.rotary_dim + pos_embd_dim = self.rotary_dim or self.embed_dim + self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) + + def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary): + """ + Splits hidden dim into attn_head_size and num_attention_heads + """ + new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) + tensor = tensor.view(new_shape) + if rotary: + return tensor + if len(tensor.shape) == 5: + return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features) + elif len(tensor.shape) == 4: + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + else: + raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") + + def _merge_heads(self, tensor, num_attention_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden dim + """ + if len(tensor.shape) == 5: + tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() + elif len(tensor.shape) == 4: + tensor = tensor.permute(0, 2, 1, 3).contiguous() + else: + raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") + new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) + return tensor.view(new_shape) + + def _attn( + self, + query, + key, + value, + attention_mask=None, + head_mask=None, + ): + # compute causal mask from causal mask buffer + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] + + # Keep the attention weights computation in fp32 to avoid overflow issues + query = query.to(torch.float32) + key = key.to(torch.float32) + + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) + attn_weights = torch.where(causal_mask, attn_weights, mask_value) + + attn_weights = attn_weights / self.scale_attn + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + attn_weights = attn_weights.to(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _get_embed_positions(self, position_ids): + embed_positions = self.embed_positions + if embed_positions.device != position_ids.device: + embed_positions = embed_positions.to(position_ids.device) + self.embed_positions = embed_positions + return embed_positions.repeat(position_ids.shape[0], 1, 1) + + def forward( + self, + hidden_states: torch.FloatTensor, + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor, Tuple[torch.Tensor]], + Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], + ]: + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) + key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) + value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) + + if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): + # The logic to conditionally copy to GPU could not be traced, so we do this + # every time in the torch.fx case + embed_positions = get_embed_positions(self.embed_positions, position_ids) + else: + embed_positions = self._get_embed_positions(position_ids) + + repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) + sincos = torch.gather(embed_positions, 1, repeated_position_ids) + sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) + + if self.rotary_dim is not None: + k_rot = key[:, :, :, : self.rotary_dim] + k_pass = key[:, :, :, self.rotary_dim :] + + q_rot = query[:, :, :, : self.rotary_dim] + q_pass = query[:, :, :, self.rotary_dim :] + + k_rot = apply_rotary_pos_emb(k_rot, sin, cos) + q_rot = apply_rotary_pos_emb(q_rot, sin, cos) + + key = torch.cat([k_rot, k_pass], dim=-1) + query = torch.cat([q_rot, q_pass], dim=-1) + else: + key = apply_rotary_pos_emb(key, sin, cos) + query = apply_rotary_pos_emb(query, sin, cos) + + key = key.permute(0, 2, 1, 3) + query = query.permute(0, 2, 1, 3) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation. + # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128 + present = (key.to(hidden_states.dtype), value) + else: + present = None + + # compute self-attention: V x Softmax(QK^T) + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPTJFlashAttention2(GPTJAttention): + """ + GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.FloatTensor, + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[ + Tuple[torch.Tensor, Tuple[torch.Tensor]], + Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], + ]: + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) + key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) + value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) + + if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): + # The logic to conditionally copy to GPU could not be traced, so we do this + # every time in the torch.fx case + embed_positions = get_embed_positions(self.embed_positions, position_ids) + else: + embed_positions = self._get_embed_positions(position_ids) + + repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) + sincos = torch.gather(embed_positions, 1, repeated_position_ids) + sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) + + if self.rotary_dim is not None: + k_rot = key[:, :, :, : self.rotary_dim] + k_pass = key[:, :, :, self.rotary_dim :] + + q_rot = query[:, :, :, : self.rotary_dim] + q_pass = query[:, :, :, self.rotary_dim :] + + k_rot = apply_rotary_pos_emb(k_rot, sin, cos) + q_rot = apply_rotary_pos_emb(q_rot, sin, cos) + + key = torch.cat([k_rot, k_pass], dim=-1) + query = torch.cat([q_rot, q_pass], dim=-1) + else: + key = apply_rotary_pos_emb(key, sin, cos) + query = apply_rotary_pos_emb(query, sin, cos) + + # tanspose to have the desired shape + # before transpose: batch_size x seq_length x num_attention_heads x head_dim + # after transpose: batch_size x num_attention_heads x seq_length x head_dim + key = key.permute(0, 2, 1, 3) + query = query.permute(0, 2, 1, 3) + # value: batch_size x num_attention_heads x seq_length x head_dim + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation. + # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128 + present = (key.to(hidden_states.dtype), value) + else: + present = None + + # The Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we need to keep the original shape for query and key, and reshape value + # to have the correct shape. + key = key.permute(0, 2, 1, 3).contiguous() + query = query.permute(0, 2, 1, 3).contiguous() + value = value.permute(0, 2, 1, 3).contiguous() + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj + + query_length = query.shape[1] + + # Compute attention + attn_weights = self._flash_attention_forward( + query, + key, + value, + attention_mask, + query_length, + dropout=attention_dropout, + ) + + # Reshape outputs + attn_output = attn_weights.reshape( + attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3] + ) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +GPTJ_ATTENTION_CLASSES = { + "eager": GPTJAttention, + "flash_attention_2": GPTJFlashAttention2, +} + + +class GPTJMLP(nn.Module): + def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim + super().__init__() + embed_dim = config.n_embd + + self.fc_in = nn.Linear(embed_dim, intermediate_size) + self.fc_out = nn.Linear(intermediate_size, embed_dim) + + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: + hidden_states = self.fc_in(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc_out(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class GPTJBlock(nn.Module): + def __init__(self, config): + super().__init__() + inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd + self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) + self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config) + self.mlp = GPTJMLP(inner_dim, config) + + def forward( + self, + hidden_states: Optional[torch.FloatTensor], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states=hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + + feed_forward_hidden_states = self.mlp(hidden_states) + hidden_states = attn_output + feed_forward_hidden_states + residual + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions) + + +class GPTJPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTJConfig + base_model_prefix = "transformer" + is_parallelizable = True + supports_gradient_checkpointing = True + _no_split_modules = ["GPTJBlock"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear,)): + # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +GPTJ_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`GPTJConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPTJ_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +PARALLELIZE_DOCSTRING = r""" + This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute + attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks + across all devices. + + Args: + device_map (`Dict[int, list]`, optional, defaults to None): + A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always + automatically mapped to the first device (for esoteric reasons). That means that the first device should + have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the + following number of attention modules: + + - gpt-j-6B: 28 + + Example: + + ```python + # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules: + model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") + device_map = { + 0: [0, 1, 2, 3, 4, 5, 6], + 1: [7, 8, 9, 10, 11, 12, 13], + 2: [14, 15, 16, 17, 18, 19, 20], + 3: [21, 22, 23, 24, 25, 26, 27], + } + model.parallelize(device_map) + ``` +""" + +DEPARALLELIZE_DOCSTRING = r""" + Moves the model to CPU from a model parallel state. + + Example: + + ```python + # On a 4 GPU machine with gpt-j-6B: + model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") + device_map = { + 0: [0, 1, 2, 3, 4, 5, 6], + 1: [7, 8, 9, 10, 11, 12, 13], + 2: [14, 15, 16, 17, 18, 19, 20], + 3: [21, 22, 23, 24, 25, 26, 27], + } + model.parallelize(device_map) # Splits the model across several devices + model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() + ``` +""" + + +@add_start_docstrings( + "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.", + GPTJ_START_DOCSTRING, +) +class GPTJModel(GPTJPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.n_embd + self.vocab_size = config.vocab_size + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + # Model parallel + self.model_parallel = False + self.device_map = None + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + warnings.warn( + "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" + " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" + " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," + " ...}", + FutureWarning, + ) + # Check validity of device_map + self.device_map = ( + get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map + ) + assert_device_map(self.device_map, len(self.h)) + self.model_parallel = True + self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) + self.last_device = "cuda:" + str(max(self.device_map.keys())) + self.wte = self.wte.to(self.first_device) + # Load onto devices + for k, v in self.device_map.items(): + for block in v: + cuda_device = "cuda:" + str(k) + self.h[block] = self.h[block].to(cuda_device) + # ln_f to last + self.ln_f = self.ln_f.to(self.last_device) + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + warnings.warn( + "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", + FutureWarning, + ) + self.model_parallel = False + self.device_map = None + self.first_device = "cpu" + self.last_device = "cpu" + self.wte = self.wte.to("cpu") + for index in range(len(self.h)): + self.h[index] = self.h[index].to("cpu") + self.ln_f = self.ln_f.to("cpu") + torch.cuda.empty_cache() + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPast, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) + + if not self._use_flash_attention_2: + # Attention mask. + if attention_mask is not None: + if batch_size <= 0: + raise ValueError("batch_size has to be defined and > 0") + attention_mask = attention_mask.view(batch_size, -1) + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask = attention_mask[:, None, None, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x num_attention_heads x N x N + # head_mask has shape n_layer x batch x num_attention_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + + hidden_states = inputs_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + # Model parallel + if self.model_parallel: + torch.cuda.set_device(hidden_states.device) + # Ensure layer_past is on same device as hidden_states (might not be correct) + if layer_past is not None: + layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) + # Ensure that attention_mask is always on the same device as hidden_states + if attention_mask is not None: + attention_mask = attention_mask.to(hidden_states.device) + if isinstance(head_mask, torch.Tensor): + head_mask = head_mask.to(hidden_states.device) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = self._gradient_checkpointing_func( + block.__call__, + hidden_states, + None, + attention_mask, + position_ids, + head_mask[i], + use_cache, + output_attentions, + ) + else: + outputs = block( + hidden_states=hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + + # Model Parallel: If it's the last layer for that device, put things on the next device + if self.model_parallel: + for k, v in self.device_map.items(): + if i == v[-1] and "cuda:" + str(k) != self.last_device: + hidden_states = hidden_states.to("cuda:" + str(k + 1)) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a language modeling head on top. + """, + GPTJ_START_DOCSTRING, +) +class GPTJForCausalLM(GPTJPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPTJModel(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings(PARALLELIZE_DOCSTRING) + def parallelize(self, device_map=None): + warnings.warn( + "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" + " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" + " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" + " 0, 'transformer.h.1': 1, ...}", + FutureWarning, + ) + self.device_map = ( + get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) + if device_map is None + else device_map + ) + assert_device_map(self.device_map, len(self.transformer.h)) + self.transformer.parallelize(self.device_map) + self.lm_head = self.lm_head.to(self.transformer.first_device) + self.model_parallel = True + + @add_start_docstrings(DEPARALLELIZE_DOCSTRING) + def deparallelize(self): + warnings.warn( + "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", + FutureWarning, + ) + self.transformer.deparallelize() + self.transformer = self.transformer.to("cpu") + self.lm_head = self.lm_head.to("cpu") + self.model_parallel = False + torch.cuda.empty_cache() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # Omit tokens covered by past_key_values + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + ) + + return model_inputs + + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithPast, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.transformer.first_device) + hidden_states = hidden_states.to(self.lm_head.weight.device) + + # make sure sampling in fp16 works correctly and + # compute loss in fp32 to match with mesh-tf version + # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 + lm_logits = self.lm_head(hidden_states).to(torch.float32) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or + [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a sequence classification head on top (linear layer). + + [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT, GPT-2, GPT-Neo) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPTJ_START_DOCSTRING, +) +class GPTJForSequenceClassification(GPTJPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPTJModel(config) + self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification", + output_type=SequenceClassifierOutputWithPast, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(pooled_logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like + SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + GPTJ_START_DOCSTRING, +) +class GPTJForQuestionAnswering(GPTJPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPTJModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Model parallel + self.model_parallel = False + self.device_map = None + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_REAL_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1).to(start_logits.device) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1).to(end_logits.device) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py new file mode 100644 index 0000000000000000000000000000000000000000..5c315b5b66f04929a48f809de5ec0994d40561a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py @@ -0,0 +1,1099 @@ +# coding=utf-8 +# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 GPT-J model.""" + +from __future__ import annotations + +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...file_utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, +) +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithPast, + TFCausalLMOutputWithPast, + TFQuestionAnsweringModelOutput, + TFSequenceClassifierOutputWithPast, +) +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFModelInputType, + TFPreTrainedModel, + TFQuestionAnsweringLoss, + TFSequenceClassificationLoss, + TFSharedEmbeddings, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import logging +from .configuration_gptj import GPTJConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B" +_CONFIG_FOR_DOC = "GPTJConfig" + + +def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor: + inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32) + sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32) + sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp) + out = tf.concat((sin, cos), axis=1) + return out + + +def rotate_every_two(x: tf.Tensor) -> tf.Tensor: + rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1) + new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])] + rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape) + return rotate_half_tensor + + +def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor: + sin_pos, cos_pos = sincos + sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3) + cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3) + return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos) + + +class TFGPTJAttention(keras.layers.Layer): + def __init__(self, config: GPTJConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.num_attention_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_attention_heads + if self.head_dim * self.num_attention_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" + f" `num_attention_heads`: {self.num_attention_heads})." + ) + self.scale_attn = self.head_dim**0.5 + self.rotary_dim = config.rotary_dim + + self.attn_dropout = keras.layers.Dropout(config.attn_pdrop) + self.resid_dropout = keras.layers.Dropout(config.resid_pdrop) + + self.q_proj = keras.layers.Dense( + self.embed_dim, + use_bias=False, + kernel_initializer=get_initializer(config.initializer_range), + name="q_proj", + ) + self.k_proj = keras.layers.Dense( + self.embed_dim, + use_bias=False, + kernel_initializer=get_initializer(config.initializer_range), + name="k_proj", + ) + self.v_proj = keras.layers.Dense( + self.embed_dim, + use_bias=False, + kernel_initializer=get_initializer(config.initializer_range), + name="v_proj", + ) + self.out_proj = keras.layers.Dense( + self.embed_dim, + use_bias=False, + kernel_initializer=get_initializer(config.initializer_range), + name="out_proj", + ) + + self.max_positions = config.max_position_embeddings + self.lower_triangle_mask = tf.reshape( + tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8), + (1, 1, self.max_positions, self.max_positions), + ) + pos_embd_dim = self.rotary_dim or self.embed_dim + self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim) + + def get_causal_mask(self, key_length, query_length) -> tf.Tensor: + return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool) + + @staticmethod + def get_masked_bias(dtype: tf.DType) -> tf.Tensor: + return tf.cast(tf.constant(-1e9), dtype) + + def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor: + """ + Splits hidden dim into attn_head_size and num_attention_heads + """ + new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim] + hidden_states = tf.reshape(hidden_states, new_shape) + if rotary: + return hidden_states + if len(shape_list(hidden_states)) == 4: + return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) + if len(shape_list(hidden_states)) == 5: + return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features) + raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}") + + def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor: + """ + Merges attn_head_size dim and num_attn_heads dim into hidden dim + """ + if len(shape_list(hidden_states)) == 4: + hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3)) + elif len(shape_list(hidden_states)) == 5: + hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4)) + else: + raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}") + new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim] + return tf.reshape(hidden_states, new_shape) + + def _attn( + self, + query: tf.Tensor, + key: tf.Tensor, + value: tf.Tensor, + attention_mask: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: + # compute causal mask from causal mask buffer + query_length, key_length = shape_list(query)[-2], shape_list(key)[-2] + causal_mask = self.get_causal_mask(key_length, query_length) + + # Keep the attention weights computation in fp32 to avoid overflow issues + query = tf.cast(query, tf.float32) + key = tf.cast(key, tf.float32) + + attn_weights = tf.matmul(query, key, transpose_b=True) + attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype)) + + attn_weights = attn_weights / self.scale_attn + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = stable_softmax(attn_weights, axis=-1) + attn_weights = tf.cast(attn_weights, value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = tf.matmul(attn_weights, value) + + return attn_output, attn_weights + + def call( + self, + hidden_states: tf.Tensor, + layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None, + attention_mask: tf.Tensor | None = None, + position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, True) + key = self._split_heads(key, True) + value = self._split_heads(value, False) + + sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype) + sincos = tf.split(sincos, 2, axis=-1) + if self.rotary_dim is not None: + k_rot = key[:, :, :, : self.rotary_dim] + k_pass = key[:, :, :, self.rotary_dim :] + + q_rot = query[:, :, :, : self.rotary_dim] + q_pass = query[:, :, :, self.rotary_dim :] + + k_rot = apply_rotary_pos_emb(k_rot, sincos) + q_rot = apply_rotary_pos_emb(q_rot, sincos) + + key = tf.concat((k_rot, k_pass), axis=-1) + query = tf.concat((q_rot, q_pass), axis=-1) + else: + key = apply_rotary_pos_emb(key, sincos) + query = apply_rotary_pos_emb(query, sincos) + + key = tf.transpose(key, (0, 2, 1, 3)) + query = tf.transpose(query, (0, 2, 1, 3)) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = tf.concat((past_key, key), axis=-2) + value = tf.concat((past_value, value), axis=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + # compute self-attention: V x Softmax(QK^T) + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "q_proj", None) is not None: + with tf.name_scope(self.q_proj.name): + self.q_proj.build([None, None, self.embed_dim]) + if getattr(self, "k_proj", None) is not None: + with tf.name_scope(self.k_proj.name): + self.k_proj.build([None, None, self.embed_dim]) + if getattr(self, "v_proj", None) is not None: + with tf.name_scope(self.v_proj.name): + self.v_proj.build([None, None, self.embed_dim]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.embed_dim]) + + +class TFGPTJMLP(keras.layers.Layer): + def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs): + super().__init__(**kwargs) + embed_dim = config.n_embd + + self.fc_in = keras.layers.Dense( + intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in" + ) + self.fc_out = keras.layers.Dense( + embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out" + ) + + self.act = get_tf_activation(config.activation_function) + self.dropout = keras.layers.Dropout(config.embd_pdrop) + self.embed_dim = config.n_embd + self.intermediate_size = intermediate_size + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.fc_in(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.fc_out(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "fc_in", None) is not None: + with tf.name_scope(self.fc_in.name): + self.fc_in.build([None, None, self.embed_dim]) + if getattr(self, "fc_out", None) is not None: + with tf.name_scope(self.fc_out.name): + self.fc_out.build([None, None, self.intermediate_size]) + + +class TFGPTJBlock(keras.layers.Layer): + def __init__(self, config: GPTJConfig, **kwargs): + super().__init__(**kwargs) + inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd + self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") + self.attn = TFGPTJAttention(config, name="attn") + self.mlp = TFGPTJMLP(inner_dim, config, name="mlp") + self.config = config + + def call( + self, + hidden_states: tf.Tensor, + layer_past: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states=hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) # attn_outputs: attn_output, present, (attentions) + attn_output = attn_outputs[0] + outputs = attn_outputs[1:] + + feed_forward_hidden_states = self.mlp(hidden_states) + hidden_states = attn_output + feed_forward_hidden_states + residual + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + return outputs # hidden_states, present, (attentions) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "ln_1", None) is not None: + with tf.name_scope(self.ln_1.name): + self.ln_1.build([None, None, self.config.n_embd]) + if getattr(self, "attn", None) is not None: + with tf.name_scope(self.attn.name): + self.attn.build(None) + if getattr(self, "mlp", None) is not None: + with tf.name_scope(self.mlp.name): + self.mlp.build(None) + + +@keras_serializable +class TFGPTJMainLayer(keras.layers.Layer): + config_class = GPTJConfig + + def __init__(self, config: GPTJConfig, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + self.config = config + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.use_cache = config.use_cache + self.return_dict = config.use_return_dict + + self.num_hidden_layers = config.n_layer + self.n_embd = config.n_embd + self.n_positions = config.n_positions + self.initializer_range = config.initializer_range + + self.wte = TFSharedEmbeddings( + config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte" + ) + self.drop = keras.layers.Dropout(config.embd_pdrop) + self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)] + self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f") + self.embed_dim = config.n_embd + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, value: tf.Tensor): + self.wte.weight = value + self.wte.vocab_size = shape_list(value)[0] + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + """ + raise NotImplementedError + + @unpack_inputs + def call( + self, + input_ids=None, + past_key_values=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if past_key_values is None: + past_length = 0 + past_key_values = [None] * len(self.h) + else: + past_length = shape_list(past_key_values[0][0])[-2] + + if position_ids is None: + position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) + + if attention_mask is not None: + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask_shape = shape_list(attention_mask) + attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + one_cst = tf.constant(1.0) + attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) + attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + raise NotImplementedError + else: + head_mask = [None] * self.num_hidden_layers + # head_mask = tf.constant([0] * self.num_hidden_layers) + + position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.wte.vocab_size) + inputs_embeds = self.wte(input_ids, mode="embedding") + + if token_type_ids is not None: + token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) + token_type_embeds = self.wte(token_type_ids, mode="embedding") + else: + token_type_embeds = tf.constant(0.0) + + token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) + hidden_states = inputs_embeds + token_type_embeds + hidden_states = self.drop(hidden_states, training=training) + + output_shape = input_shape + [shape_list(hidden_states)[-1]] + + presents = () if use_cache else None + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) + + outputs = block( + hidden_states=hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + training=training, + ) + + hidden_states = outputs[0] + if use_cache: + presents = presents + (outputs[1],) + + if output_attentions: + all_attentions = all_attentions + (outputs[2 if use_cache else 1],) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = tf.reshape(hidden_states, output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if output_attentions: + # let the number of heads free (-1) so we can extract attention even after head pruning + attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] + all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None) + + return TFBaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "wte", None) is not None: + with tf.name_scope(self.wte.name): + self.wte.build(None) + if getattr(self, "ln_f", None) is not None: + with tf.name_scope(self.ln_f.name): + self.ln_f.build([None, None, self.embed_dim]) + if getattr(self, "h", None) is not None: + for layer in self.h: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFGPTJPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTJConfig + base_model_prefix = "transformer" + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"] + + +GPTJ_START_DOCSTRING = r""" + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Parameters: + config ([`GPTJConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPTJ_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of + input past key value states). Indices of input sequence tokens in the vocabulary. + + If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`List[tf.Tensor]` of length `config.n_layers`): + Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see + `past` output below). Can be used to speed up sequential decoding. The token ids which have their past + given to this model should not be passed as input ids as they have already been computed. + attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used + in eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.", + GPTJ_START_DOCSTRING, +) +class TFGPTJModel(TFGPTJPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.transformer = TFGPTJMainLayer(config, name="transformer") + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]: + r""" + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past`). Set to `False` during training, `True` during generation + """ + + outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a language modeling head on top. + """, + GPTJ_START_DOCSTRING, +) +class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.transformer = TFGPTJMainLayer(config, name="transformer") + self.lm_head = keras.layers.Dense( + config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head" + ) + self.config = config + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # only last token for inputs_ids if past is defined in kwargs + if past_key_values: + inputs = tf.expand_dims(inputs[:, -1], -1) + if token_type_ids is not None: + token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) + + position_ids = kwargs.get("position_ids", None) + attention_mask = kwargs.get("attention_mask", None) + + if attention_mask is not None and position_ids is None: + position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) + if past_key_values: + position_ids = tf.expand_dims(position_ids[:, -1], -1) + + return { + "input_ids": inputs, + "attention_mask": attention_mask, + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": use_cache, + "token_type_ids": token_type_ids, + } + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFCausalLMOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + labels: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]: + r""" + labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + + transformer_outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + hidden_states = transformer_outputs[0] + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # shift labels to the left and cut last logit token + shifted_logits = lm_logits[:, :-1] + labels = labels[:, 1:] + loss = self.hf_compute_loss(labels, shifted_logits) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFCausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + if getattr(self, "lm_head", None) is not None: + with tf.name_scope(self.lm_head.name): + self.lm_head.build([None, None, self.config.n_embd]) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a sequence classification head on top (linear layer). + + [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT, GPT-2, GPT-Neo) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPTJ_START_DOCSTRING, +) +class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss): + _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + self.transformer = TFGPTJMainLayer(config, name="transformer") + self.score = keras.layers.Dense( + self.num_labels, + use_bias=False, + kernel_initializer=get_initializer(config.initializer_range), + name="score", + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFSequenceClassifierOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + labels: np.ndarray | tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: + r""" + labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + + transformer_outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + logits_shape = shape_list(logits) + in_logits = None + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + sequence_lengths = ( + tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1) + - 1 + ) + sequence_lengths = tf.where( + sequence_lengths >= 0, + sequence_lengths, + tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1, + ) + in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + loss = None + + if labels is not None: + if self.config.pad_token_id is None and logits_shape[0] != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + + if not tf.is_tensor(sequence_lengths): + in_logits = logits[0 : logits_shape[0], sequence_lengths] + + loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) + pooled_logits = in_logits if in_logits is not None else logits + + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + if getattr(self, "score", None) is not None: + with tf.name_scope(self.score.name): + self.score.build([None, None, self.config.n_embd]) + + +@add_start_docstrings( + """ + The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like + SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + GPTJ_START_DOCSTRING, +) +class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss): + _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + self.transformer = TFGPTJMainLayer(config, name="transformer") + self.qa_outputs = keras.layers.Dense( + self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: np.ndarray | tf.Tensor | None = None, + start_positions: np.ndarray | tf.Tensor | None = None, + end_positions: np.ndarray | tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: + r""" + start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + + transformer_outputs = self.transformer( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = transformer_outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = tf.split(logits, 2, axis=-1) + start_logits = tf.squeeze(start_logits, axis=-1) + end_logits = tf.squeeze(end_logits, axis=-1) + + loss = None + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions} + labels["end_position"] = end_positions + loss = self.hf_compute_loss(labels, (start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + transformer_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "transformer", None) is not None: + with tf.name_scope(self.transformer.name): + self.transformer.build(None) + if getattr(self, "qa_outputs", None) is not None: + with tf.name_scope(self.qa_outputs.name): + self.qa_outputs.build([None, None, self.config.hidden_size]) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3635ace91163577201f716c9d67e255f11ea55b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py @@ -0,0 +1,70 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_tf_available, + is_torch_available, +) + + +_import_structure = { + "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"], + "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gptsan_japanese"] = [ + "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTSanJapaneseForConditionalGeneration", + "GPTSanJapaneseModel", + "GPTSanJapanesePreTrainedModel", + ] + _import_structure["tokenization_gptsan_japanese"] = [ + "GPTSanJapaneseTokenizer", + ] + + +if TYPE_CHECKING: + from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig + from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gptsan_japanese import ( + GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTSanJapaneseForConditionalGeneration, + GPTSanJapaneseModel, + GPTSanJapanesePreTrainedModel, + ) + from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2585af62f8e6cabb8978cd00aa8a078b17892082 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36d6b0d0849cd72d5e0fbfbdbdfaba2fc32a70e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edcf6c6c1f2fc811e89cbb6f641910fa5fea4b51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f82f904ef24e78cc217e7b02a929dcbc38daf43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c2b69730bb63868d87a46ad0ad4e4abcfa5fbfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a17d1c114aef10038478955fac1d244e8c9a02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py @@ -0,0 +1,156 @@ +# coding=utf-8 +# Copyright 2023, HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GPTSAN-japanese model configuration""" +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class GPTSanJapaneseConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate + a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese + [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Arguments: + vocab_size (`int`, *optional*, defaults to 36000): + Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented + by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`]. + max_position_embeddings (`int`, *optional*, defaults to 1280): + The maximum sequence length that this model might ever be used with. Defaults set this to 1280. + d_model (`int`, *optional*, defaults to 1024): + Size of the encoder layers and the pooler layer. + d_ff (`int`, *optional*, defaults to 8192): + Size of the intermediate feed forward layer in each `SwitchTransformersBlock`. + d_ext (`int`, *optional*, defaults to 4096): + Size of the intermediate feed forward layer in each Extra-layers. + d_spout (`int`, *optional*, defaults to 128): + Size of the `spout` vector. + num_switch_layers (`int`, *optional*, defaults to 10): + Number of layers in the Switch Transformer layer. + num_ext_layers (`int`, *optional*, defaults to 0): + Number of layers in the Extra-layers. + num_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + num_experts (`int`, *optional*, defaults to 16): + Number of experts for each SwitchTransformer layer. + expert_capacity (`int`, *optional*, defaults to 128): + Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular + Transformer. + dropout_rate (`float`, *optional*, defaults to 0.0): + The ratio for all dropout layers. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + router_bias (`bool`, *optional*, defaults to `False`): + Whether to add a bias to the router. + router_jitter_noise (`float`, *optional*, defaults to 0.0): + Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2) + during training. + router_dtype (`str`, *optional*, default to `"float32"`): + The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the + *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961). + router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`): + Whether to ignore padding tokens when routing. + output_hidden_states (`bool`, *optional*, default to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. + initializer_factor (`float`, *optional*, defaults to 0.002): + A factor for initializing all weight matrices. + output_router_logits (`bool`, *optional*, default to `False`): + Whether or not to return the router logits of all experts. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models) + """ + + model_type = "gptsan-japanese" + keys_to_ignore_at_inference = [ + "past_key_values", + ] + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "num_heads", + "num_hidden_layers": "num_layers", + } + + def __init__( + self, + vocab_size=36000, + max_position_embeddings=1280, + d_model=1024, + d_ff=8192, + d_ext=4096, + d_spout=128, + num_switch_layers=10, + num_ext_layers=0, + num_heads=16, + num_experts=16, + expert_capacity=128, + dropout_rate=0.0, + layer_norm_epsilon=1e-5, + router_bias=False, + router_jitter_noise=0.0, + router_dtype="float32", + router_ignore_padding_tokens=False, + output_hidden_states=False, + output_attentions=False, + initializer_factor=0.002, + output_router_logits=False, + use_cache=True, + separator_token_id=35998, + pad_token_id=35995, + eos_token_id=35999, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.d_ff = d_ff + self.d_ext = d_ext + self.d_spout = d_spout + self.num_switch_layers = num_switch_layers + self.num_ext_layers = num_ext_layers + self.num_layers = num_switch_layers + num_ext_layers + self.num_heads = num_heads + self.num_experts = num_experts + self.expert_capacity = expert_capacity + self.dropout_rate = dropout_rate + self.layer_norm_epsilon = layer_norm_epsilon + self.router_bias = router_bias + self.router_jitter_noise = router_jitter_noise + self.router_dtype = router_dtype + self.router_ignore_padding_tokens = router_ignore_padding_tokens + self.output_hidden_states = output_hidden_states + self.output_attentions = output_attentions + self.initializer_factor = initializer_factor + self.output_router_logits = output_router_logits + self.use_cache = use_cache + + super().__init__( + separator_token_id=separator_token_id, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..a84d000d44390fe6ae821fb1cdfba968d40a2b93 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Convert GPTSANJapanese checkpoints from the original repository to pytorch model.""" + +import argparse +import json +import os +from collections import OrderedDict + +import numpy as np +import tensorflow as tf +import torch + + +def convert_tf_gptsan_to_pt(args): + parameter_file = os.path.join(args.tf_model_dir, "parameters.json") + params = json.loads(open(parameter_file).read()) + if not params: + raise ValueError( + f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." + ) + if not args.output.endswith(".pt"): + args.output = args.output + ".pt" + new_state = OrderedDict() + with tf.device("/CPU:0"): + reader = tf.train.load_checkpoint(args.tf_model_dir) + shapes = reader.get_variable_to_shape_map() + for key_name in shapes.keys(): + vnp = reader.get_tensor(key_name).astype(np.float16) + if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"): + continue + if key_name.startswith("pasts/"): + if key_name.startswith("pasts/mlp"): + player = int(key_name[9]) + elif key_name.startswith("pasts/out"): + player = 8 + name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/moe"): + player = int(key_name[9:].split("/")[0]) + if key_name.endswith("/switch_gating/kernel"): + name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.endswith("/softmlp/kernel"): + name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"): + nlayer = key_name[-9:-7] + for i in range(16): + name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer) + state = ( + vnp[i].transpose([1, 0]).copy() + ) # In Mesh-Tensorflow, it is one array, so it is divided + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/mlp"): + player = int(key_name[9:].split("/")[0]) + if key_name.endswith("/p1/kernel"): + name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.endswith("/p1/bias"): + name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif key_name.endswith("/p2/kernel"): + name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.endswith("/p2/bias"): + name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/ln"): + player = int(key_name[8:].split("/")[0]) + if key_name.endswith("/b"): + name = "model.blocks.%d.feed_forward.norm.bias" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif key_name.endswith("/g"): + name = "model.blocks.%d.feed_forward.norm.weight" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/att"): + player = int(key_name[9:].split("/")[0]) + if key_name.endswith("/qkv/kernel"): + state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum + state_q = state[:, 0, :, :] + state_k = state[:, 1, :, :] + state_v = state[:, 2, :, :] + state_q = ( + state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]]) + .transpose([1, 0]) + .copy() + ) # Mesh-Tensorflow is a diagonal matrix + state_k = ( + state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]]) + .transpose([1, 0]) + .copy() + ) # Mesh-Tensorflow is a diagonal matrix + state_v = ( + state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]]) + .transpose([1, 0]) + .copy() + ) # Mesh-Tensorflow is a diagonal matrix + name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player + new_state[name] = torch.tensor(state_q) + name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player + new_state[name] = torch.tensor(state_k) + name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player + new_state[name] = torch.tensor(state_v) + elif key_name.endswith("/o/kernel"): + name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player + state = ( + vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy() + ) # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/an"): + player = int(key_name[8:].split("/")[0]) + if key_name.endswith("/b"): + name = "model.blocks.%d.self_attn.norm.bias" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif key_name.endswith("/g"): + name = "model.blocks.%d.self_attn.norm.weight" % player + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + elif ( + key_name.startswith("model/wte") + or key_name.startswith("model/wpe") + or key_name.startswith("model/ete") + ): + nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[ + key_name[-3:] + ] + name = "model.%s.weight" % nlayer + state = vnp.copy() # same in embedded + new_state[name] = torch.tensor(state) + if key_name.startswith("model/wte"): + name = "lm_head.weight" + state = vnp.copy() # same in embedded + new_state[name] = torch.tensor(state) + elif key_name.startswith("model/wob"): + name = "final_logits_bias" + state = vnp.copy() # same in embedded + state = state.reshape((1, -1)) + new_state[name] = torch.tensor(state) + elif key_name == "model/dense/kernel": + name = "model.last_project.weight" + state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix + new_state[name] = torch.tensor(state) + elif key_name == "model/dense_1/bias": + name = "model.last_project.bias" + state = vnp.copy() # same because it is one dimensional + new_state[name] = torch.tensor(state) + torch.save(new_state, args.output) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model") + parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model") + args = parser.parse_args() + convert_tf_gptsan_to_pt(args) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..59252bc567a4624a058d65c1e07270c185b8a71b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -0,0 +1,1343 @@ +# coding=utf-8 +# Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch GPTSANJapanese model.""" + + +import copy +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ...activations import ACT2FN +from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions +from ...modeling_utils import PreTrainedModel +from ...utils import ( + DUMMY_INPUTS, + DUMMY_MASK, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_torch_fx_proxy, + logging, +) +from .configuration_gptsan_japanese import GPTSanJapaneseConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "GPTSanJapaneseConfig" +_CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese" + +#################################################### +# This dict contains ids and associated url +# for the pretrained weights provided with the models +#################################################### + +from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func +def router_z_loss_func(router_logits: torch.Tensor) -> float: + r""" + Compute the router z-loss implemented in PyTorch. + + The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906). + It encourages router logits to remain small in an effort to improve stability. + + Args: + router_logits (`float`): + Input logits of shape [batch_size, sequence_length, num_experts] + + Returns: + Scalar router z-loss. + """ + num_groups, tokens_per_group, _ = router_logits.shape + log_z = torch.logsumexp(router_logits, dim=-1) + z_loss = log_z**2 + return torch.sum(z_loss) / (num_groups * tokens_per_group) + + +# Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func +def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: + r""" + Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. + + See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss + function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between + experts is too unbalanced. + + Args: + router_probs (`torch.Tensor`): + Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. + expert_indices (`torch.Tensor`): + Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. + + Returns: + The auxiliary loss. + """ + num_experts = router_probs.shape[-1] + + # cast the expert indices to int64, otherwise one-hot encoding will fail + if expert_indices.dtype != torch.int64: + expert_indices = expert_indices.to(torch.int64) + + if len(expert_indices.shape) == 2: + expert_indices = expert_indices.unsqueeze(2) + + expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) + + # For a given token, determine if it was routed to a given expert. + expert_mask = torch.max(expert_mask, axis=-2).values + + # cast to float32 otherwise mean will fail + expert_mask = expert_mask.to(torch.float32) + tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) + + router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) + return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) + + +class GPTSanJapaneseDenseActDense(nn.Module): + """ + FFN Layer for Switch Transformer and Extra layers + + GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch + Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and + Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument. + + """ + + def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False): + super().__init__() + d_inter = config.d_ext if ext_layer else config.d_ff + self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer) + self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer) + self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate) + self.act = ACT2FN["swish" if ext_layer else "relu"] + + def forward(self, hidden_states): + r""" + Args: + hidden_states (`torch.Tensor`) : + [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. + Returns: + torch.Tensor[num_groups, tokens_per_group, hidden_dim] + + """ + hidden_states = self.wi(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.wo(hidden_states) + return hidden_states + + +# Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese +class GPTSanJapaneseTop1Router(nn.Module): + """ + Router using tokens choose top-1 experts assignment. + + This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE + (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then + routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each + token is processed by an expert**, or that each expert receives at least one token. + + """ + + def __init__(self, config: GPTSanJapaneseConfig): + super().__init__() + self.num_experts = config.num_experts + self.expert_capacity = config.expert_capacity + self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) + self.jitter_noise = config.router_jitter_noise + self.ignore_padding_tokens = config.router_ignore_padding_tokens + self.dtype = getattr(torch, config.router_dtype) + + def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Computes router probabilities from input hidden states. + + Args: + hidden_states (`torch.Tensor`): + (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. + Returns: + router_probabilities (`torch.Tensor`): + Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each + token and expert. Used for routing tokens to experts. + router_logits (`torch.Tensor`): + Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. + This is used later for computing router z-loss. + """ + # float32 is used to ensure stability. See the discussion of "selective precision" in + # https://arxiv.org/abs/2101.03961. + # We also store the previous dtype to cast back the output to the previous dtype + self.input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(self.dtype) + + if self.training and self.jitter_noise > 0: + # Multiply the token inputs by the uniform distribution - adding some noise + hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) + + # Shape: [num_groups, tokens_per_group, num_experts] + self._cast_classifier() + router_logits = self.classifier(hidden_states) + + # Apply Softmax and cast back to the original `dtype` + router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype) + return router_probabilities, router_logits + + def _cast_classifier(self): + r""" + `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an + instance of the `Linear8bitLt` class by checking special attributes. + """ + if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): + self.classifier = self.classifier.to(self.dtype) + + def forward(self, hidden_states: torch.Tensor) -> Tuple: + r""" + Generic forward function for every Router class. Each Router expects to have the same input hidden states + (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the + number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert. + + Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and + `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned + to an expert. Then each Router class will have to define its own `_compute_routing_instructions`. + + Args: + hidden_states (`torch.Tensor`) : + [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. + Returns: + Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs + and the router logits. The router probabilities and logits are required to compute the loss. + """ + router_probs, router_logits = self._compute_router_probabilities(hidden_states) + + expert_index = torch.argmax(router_probs, dim=-1) + expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts) + + # Mask tokens outside expert capacity. Sum over each sequence + token_priority = torch.cumsum(expert_index, dim=-2) + # mask if the token routed to to the expert will overflow + expert_capacity_mask = token_priority <= self.expert_capacity + expert_index = expert_index * expert_capacity_mask + + router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1) + return expert_index, router_probs, router_logits + + +# Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese +class GPTSanJapaneseSparseMLP(nn.Module): + r""" + Implementation of the Switch Transformers Sparse MLP module. + """ + + def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense): + super().__init__() + # Step 1: Get the correct router according to its class + self.router = GPTSanJapaneseTop1Router(config) + + # Step 2: Get the experts + self.experts = nn.ModuleDict() + for idx in range(config.num_experts): + self.experts[f"expert_{idx}"] = expert_class(config) + + def forward(self, hidden_states): + r""" + Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following: + + 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)` + and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the + hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor). + + 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each + expert the corresponding hidden states. + + """ + # Step 1: Get the router_mask from the router as wel as the probabilities + router_mask, router_probs, router_logits = self.router(hidden_states) + expert_index = torch.argmax(router_mask, dim=-1) + + # The routers introduced might not always map all the tokens, to a router, which means that some hidden states + # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones. + + next_states = hidden_states.clone() + for idx, expert in enumerate(self.experts.values()): + token_indices = router_mask[:, :, idx].bool() + next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype) + + hidden_states = router_probs * next_states + return hidden_states, (router_logits, expert_index) + + +class GPTSanJapaneseLayerSparseFF(nn.Module): + r""" + Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module. + + Parameters: + config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. + """ + + def __init__(self, config: GPTSanJapaneseConfig): + super().__init__() + self.mlp = GPTSanJapaneseSparseMLP(config) + self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False) + self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + + def forward(self, hidden_states, output_router_logits): + r""" + Args: + hidden_states (`torch.Tensor`) : + [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. + output_router_logits (`bool`) : + output experts router output. + Returns: + torch.Tensor[num_groups, tokens_per_group, hidden_dim] + + """ + forwarded_states, router_tuple = self.mlp(hidden_states) + forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states)) + output = hidden_states + self.norm(forwarded_states) + + if output_router_logits and router_tuple is not None: + return output, router_tuple + else: + return output + + +class GPTSanJapaneseLayerDenseFF(nn.Module): + r""" + Extra Transformers Feed Forward layer module. + + Parameters: + config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. + """ + + def __init__(self, config: GPTSanJapaneseConfig): + super().__init__() + # Check if it is a sparse layer, if not then it is a dense layer + self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True) + self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + + def forward(self, hidden_states): + r""" + Args: + hidden_states (`torch.Tensor`) : + [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. + Returns: + torch.Tensor[num_groups, tokens_per_group, hidden_dim] + + """ + forwarded_states = self.mlp(hidden_states) + output = hidden_states + self.norm(forwarded_states) + return output + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese +class GPTSanJapaneseAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[GPTSanJapaneseConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class GPTSanJapaneseLayerSelfAttention(nn.Module): + """ + Self Attention and Normalization Unit + """ + + def __init__(self, config, has_relative_attention_bias=False): + super().__init__() + self.self_attn = GPTSanJapaneseAttention( + embed_dim=config.d_model, + num_heads=config.num_heads, + is_decoder=True, + bias=has_relative_attention_bias, + ) + self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + r""" + Self-attention and normalize block. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up + decoding. If `past_key_values` are used, the user can optionally input only the last + `decoder_input_ids` (those that don't have their past key value states given to this model) of shape + `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + head_mask (`numpy.ndarray` of shape `({0})`, `optional): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + Returns: + Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] + """ + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + atten_out = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min, + layer_head_mask=head_mask, + output_attentions=output_attentions, + ) + if output_attentions: + attn_weights = (atten_out[1],) + else: + attn_weights = () + + attention_output = atten_out[0] + + hidden = hidden_states + self.norm(attention_output) + + if use_cache: + outputs = (hidden, atten_out[2]) # hidden, present, (attentions) + else: + outputs = (hidden,) # hidden, (attentions) + + return outputs + attn_weights + + +class GPTSanJapaneseBlock(nn.Module): + """ + Self Attention and FFN Unit + """ + + def __init__(self, config, ext_layer=False): + super().__init__() + self.self_attn = GPTSanJapaneseLayerSelfAttention(config) + self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + output_router_tuple: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + r""" + GPTSAN transformer block. + + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up + decoding. If `past_key_values` are used, the user can optionally input only the last + `decoder_input_ids` (those that don't have their past key value states given to this model) of shape + `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + head_mask (`numpy.ndarray` of shape `({0})`, `optional): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + output_attentions (`bool`) : + output attention probabirities. + output_router_tuple: + output experts router logits and expert id. + Returns: + Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...] + """ + atten_out = self.self_attn( + hidden_states=hidden_states, + past_key_value=past_key_value, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attention_output = atten_out[0] + + if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF): + sparse_out = self.feed_forward(attention_output, output_router_tuple) + if output_router_tuple: + hidden, router_tuple = sparse_out + else: + hidden = sparse_out + else: + hidden = self.feed_forward(attention_output) + + outputs = (hidden,) + atten_out[1:] + + if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple: + outputs += (router_tuple,) + + return outputs + + +class GPTSanJapanesePreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTSanJapaneseConfig + base_model_prefix = "gptsan_japanese" + supports_gradient_checkpointing = False + _no_split_modules = ["GPTSanJapaneseBlock"] + _skip_keys_device_placement = "past_key_values" + + @property + def dummy_inputs(self): + input_ids = torch.tensor(DUMMY_INPUTS) + input_mask = torch.tensor(DUMMY_MASK) + dummy_inputs = { + "input_ids": input_ids, + "attention_mask": input_mask, + } + return dummy_inputs + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor # Used for testing weights initialization + if isinstance(module, nn.LayerNorm): + module.weight.data.fill_(factor * 1.0) + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module, "bias") and module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=factor * 1.0) + elif isinstance(module, GPTSanJapaneseModel): + # Mesh TensorFlow embeddings initialization + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 + module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0) + module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) + if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None: + module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0) + elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)): + # Mesh TensorFlow embeddings initialization + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 + module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0) + if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: + module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) + elif isinstance(module, GPTSanJapaneseDenseActDense): + # Mesh TensorFlow FF initialization + # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 + # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 + module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) + if hasattr(module.wi, "bias") and module.wi.bias is not None: + module.wi.bias.data.zero_() + module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) + if hasattr(module.wo, "bias") and module.wo.bias is not None: + module.wo.bias.data.zero_() + elif isinstance(module, GPTSanJapaneseAttention): + # Multi-headed attention + d_model = self.config.d_model + key_value_proj_dim = self.config.d_model + n_heads = self.config.num_heads + module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) + module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) + module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) + module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) + elif isinstance(module, GPTSanJapaneseSparseMLP): + # Mesh TensorFlow attention initialization to avoid scaling before softmax + # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 + d_model = self.config.d_model + key_value_proj_dim = self.config.d_model + n_heads = self.config.num_heads + module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1) + for idx in range(self.config.num_experts): + module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) + + # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right + def _shift_right(self, input_ids): + decoder_start_token_id = self.config.decoder_start_token_id + pad_token_id = self.config.pad_token_id + + if decoder_start_token_id is None: + raise ValueError( + "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. " + "See T5 docs for more information." + ) + + # shift inputs to the right + if is_torch_fx_proxy(input_ids): + # Item assignment is not supported natively for proxies. + shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) + shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) + else: + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() + shifted_input_ids[..., 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +GPTSAN_JAPANESE_START_DOCSTRING = r""" + + The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer + based Japanese language model + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPTSAN_JAPANESE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence + continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are + automatically appended. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **prefix** input, + - 0 for tokens that are **not-prefix** input. + spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`): + This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`. + Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models. +""" + + +@add_start_docstrings( + "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.", + GPTSAN_JAPANESE_START_DOCSTRING, +) +class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel): + def __init__(self, config: GPTSanJapaneseConfig): + super().__init__(config) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) + self.config = copy.deepcopy(config) + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) + self.last_project = nn.Linear(config.d_model, config.d_model, bias=True) + self.act = ACT2FN["swish"] + + self.blocks = torch.nn.ModuleList([]) + for _ in range(config.num_switch_layers): + self.blocks.append(GPTSanJapaneseBlock(config)) + for _ in range(config.num_ext_layers): + self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True)) + + if config.num_ext_layers > 0: + self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model) + + if config.d_spout: + spouts = [] + for _ in range(8): + spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False)) + spouts.append(nn.Tanh()) + spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False)) + self.spout = nn.Sequential(*spouts) + + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, new_embeddings): + self.embed_tokens = new_embeddings + + @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.FloatTensor] = None, + spout: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + num_precontext: Optional[torch.LongTensor] = None, + ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]: + r""" + num_precontext (`torch.LongTensor` of shape `(batch_size,1)`): + length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like + BERT, tokens after that refer only to front like GPT. see also: + https://github.com/tanreinama/GPTSAN/blob/main/report/model.md + + Returns: + `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns + MoEModelOutputWithPastAndCrossAttentions insted of tuple + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + device = self.position_embeddings.weight.device + if input_ids is None: + input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None + num_pasts_contexts = 0 + num_batch = input_ids.shape[0] + pasts_or_spout_value = None + if past_key_values is not None: + num_pasts_contexts = past_key_values[0][0].shape[2] + elif self.config.d_spout and spout is not None: + # `spout` is a special input vector specific to GPTSAN + # This controls the output by projecting embedded information such as the class of sentences during learning. + # It should passed instead of the first past_key_value. + # See the original GPTSAN repository for details + num_pasts_contexts += 1 + + # If there is an attention_mask, increase first one for spout + if self.config.d_spout and spout is not None and attention_mask is not None: + attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device) + attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout + attention_mask = attention_mask_with_spout # update attention_mask + + if num_precontext is not None: + # `num_precontext` is the number of tokens that refer to each other in prefix-lm + # created per batch, so dimension of num_precontext should be [batch, 1] + if not ( + len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1 + ): # num_precontext Should be [batch,1] + raise ValueError("num_precontext should be [batch, 1] size.") + num_precontext = torch.reshape(num_precontext, [-1]) + else: + num_precontext = torch.zeros([num_batch]).int().to(device) + + num_input_contexts = input_ids.shape[1] + num_output_contexts = num_input_contexts + num_pasts_contexts + + hidden_states = self.embed_tokens(input_ids) + + if past_key_values is not None: + pasts_or_spout_value = past_key_values + elif self.config.d_spout and spout is not None: + # Make vector from `spout` of GPTSAN to the same shape as past_key_values + pasts_or_spout_value = self.spout(spout) # projecting `spout` vector + pasts_or_spout_value = torch.reshape( + pasts_or_spout_value, + [ + num_batch, + self.config.num_layers, + 2, + self.config.num_heads, + num_pasts_contexts, + self.config.d_model // self.config.num_heads, + ], + ) + pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1) + # make same shape as past_key_values + pasts_or_spout_value = tuple( + tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value + ) + else: + pasts_or_spout_value = [None] * self.config.num_layers + + # Token position considering spout and pasts + token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts + + if attention_mask is None: + attention_mask = torch.ones(num_batch, num_input_contexts, device=device) + + # positions for get position_embeddings + gather_position = ( + ( + torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device) + + token_position.unsqueeze(0) + ) + .transpose(1, 2) + .long() + ) + # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly + gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2) + gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1) + + # attention_mask is applied per batch + for i in range(num_batch): + hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i]) + + # Create a mask to be used when making the prefix Input length of Prefix-LM variable + causal_mask = ( + torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8)) + .view(1, 1, num_output_contexts, num_output_contexts) + .to(device) + ) + prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :] + if token_type_ids is not None: + token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2) + prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float() + # Marge prefix_lm_mask and attention_mask + extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2) + + # Prepare head mask if needed + if head_mask is not None: + head_mask = self.get_head_mask( + head_mask, self.config.num_switch_layers + self.config.num_ext_layers + ) # n_layer x batch x n_heads x N x N + + # outputs + present_key_value_states = () if self.config.use_cache or use_cache else None + all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None + all_attentions = () if self.config.output_attentions or output_attentions else None + all_router_probs = () if self.config.output_router_logits or output_router_logits else None + + for layer, past in enumerate(pasts_or_spout_value): + if layer == self.config.num_switch_layers: + if self.config.num_ext_layers > 0: + # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model. + # However, it is created when you create an additional layer and partially train only that location. + # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository. + for i in range(num_batch): + hidden_states[i] += torch.gather( + self.extra_position_embeddings.weight, dim=0, index=gather_position[i] + ) + + output_router_tuple = ( + self.config.output_router_logits or output_router_logits + ) and layer < self.config.num_switch_layers + block_output = self.blocks[layer]( + hidden_states=hidden_states, + past_key_value=past, + attention_mask=extended_attention_mask, + head_mask=head_mask, + use_cache=self.config.use_cache or use_cache, + output_attentions=self.config.output_attentions or output_attentions, + output_router_tuple=output_router_tuple, + ) + + outpos = 0 + hidden_states = block_output[outpos] + if self.config.output_hidden_states or output_hidden_states: + all_hidden_states += (hidden_states,) + if self.config.use_cache or use_cache: + outpos += 1 + present = block_output[outpos] + present_key_value_states += (present,) + if self.config.output_attentions or output_attentions: + outpos += 1 + attention_probs = block_output[outpos] + all_attentions += (attention_probs,) + if output_router_tuple: + outpos += 1 + router_tuple = block_output[outpos] + all_router_probs.append(router_tuple[0]) + + hidden_states = self.last_project(hidden_states) + hidden_states = self.act(hidden_states) + + if self.config.output_hidden_states or output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + present_key_value_states, + all_hidden_states, + all_attentions, + all_router_probs, + ] + if v is not None + ) + + return MoEModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_value_states, + hidden_states=all_hidden_states, + attentions=all_attentions, + router_probs=all_router_probs, + ) + + +@add_start_docstrings( + "The bare GPTSAN-japanese Model with a language modeling head.", + GPTSAN_JAPANESE_START_DOCSTRING, +) +class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config: GPTSanJapaneseConfig): + super().__init__(config) + self.model = GPTSanJapaneseModel(config) + self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size])) + self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) + if not self.config.torchscript: + self.lm_head.weight = self.model.embed_tokens.weight + + @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.FloatTensor] = None, + spout: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + head_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_router_logits: Optional[bool] = None, + labels: Optional[torch.LongTensor] = None, + ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ..., + config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for + labels in `[0, ..., config.vocab_size]` + + Returns: + `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple + + Example: + + Text Generation with regular LM Model + ```python + >>> from transformers import AutoModel, AutoTokenizer, trainer_utils + + >>> device = "cuda" + >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) + >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> x_token = tokenizer("織田信長は、", return_tensors="pt") + >>> trainer_utils.set_seed(30) + >>> input_ids = x_token.input_ids.to(device) + >>> gen_token = model.generate(input_ids, max_new_tokens=50) + >>> tokenizer.decode(gen_token[0]) + "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..." + ``` + + Text Generation with Prefix-LM Model + ```python + >>> from transformers import AutoModel, AutoTokenizer, trainer_utils + + >>> device = "cuda" + >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) + >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt") + >>> trainer_utils.set_seed(30) + >>> input_ids = x_token.input_ids.to(device) + >>> token_type_ids = x_token.token_type_ids.to(device) + >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) + >>> tokenizer.decode(gen_token[0]) + "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..." + ``` + + Simultaneously Text Generation And Masked Language Model + ```python + >>> from transformers import AutoModel, AutoTokenizer, trainer_utils + + >>> device = "cuda" + >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device) + >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。" + >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt") + >>> trainer_utils.set_seed(30) + >>> input_ids = x_token.input_ids.to(device) + >>> token_type_ids = x_token.token_type_ids.to(device) + >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50) + >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1) + >>> tokenizer.decode(out_mlm_token[0]) + "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。" + + >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :]) + "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..." + ```""" + SEG_TOKEN = self.config.separator_token_id + use_cache = use_cache or self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + model_return_dict = True + num_precontext = None + if input_ids is not None: + num_batch = input_ids.shape[0] + num_precontext = torch.zeros([num_batch]).int().to(input_ids.device) + where_separators = torch.where(input_ids == SEG_TOKEN) + num_precontext[where_separators[0]] += where_separators[1] + num_precontext = num_precontext.unsqueeze(1) + + outputs = self.model( + input_ids, + attention_mask, + token_type_ids, + spout, + past_key_values, + head_mask, + use_cache, + inputs_embeds, + decoder_inputs_embeds, + output_attentions, + output_hidden_states, + model_return_dict, + output_router_logits, + num_precontext, + ) + + lm_logits = self.lm_head(outputs[0]) + if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]: + lm_logits = lm_logits + self.final_logits_bias + + loss = None + z_loss = None + router_probs = None + aux_loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + + loss_fct = nn.CrossEntropyLoss(ignore_index=-100) + + if output_router_logits: + # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder + router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs) + z_loss = router_z_loss_func(router_logits) + router_probs = nn.Softmax(dim=-1)(router_logits) + aux_loss = load_balancing_loss_func(router_probs, expert_indexes) + + loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) + + if not return_dict: + return tuple( + v + for v in [ + loss, + lm_logits, + outputs.past_key_values, + outputs.hidden_states, + outputs.router_probs, + z_loss, + aux_loss, + ] + if v is not None + ) + + return MoECausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + router_logits=outputs.router_probs, + z_loss=z_loss, + aux_loss=aux_loss, + ) + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + attention_mask: torch.FloatTensor, + token_type_ids: Optional[torch.FloatTensor] = None, + spout: Optional[Union[List, torch.FloatTensor]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + **kwargs, + ): + if isinstance(spout, list): + spout = torch.tensor(spout).float() + if input_ids is not None: + spout = spout.to(input_ids.device) + if past_key_values is not None: + return { + "input_ids": input_ids[:, -1:] if input_ids is not None else None, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None, + "spout": spout, + "past_key_values": past_key_values, + } + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + "spout": spout, + "past_key_values": None, + } + + # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return self._shift_right(labels) + + # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese + def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + self._resize_final_logits_bias(new_embeddings.weight.shape[0]) + return new_embeddings + + # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese + def _resize_final_logits_bias(self, new_num_tokens: int) -> None: + old_num_tokens = self.final_logits_bias.shape[-1] + if new_num_tokens <= old_num_tokens: + new_bias = self.final_logits_bias[:, :new_num_tokens] + else: + extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) + new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) + self.register_buffer("final_logits_bias", new_bias) + + def get_input_embeddings(self): + return self.model.get_input_embeddings() + + def set_input_embeddings(self, new_embeddings): + self.model.set_input_embeddings(new_embeddings) + + # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese + def get_output_embeddings(self): + return self.lm_head + + # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese + def _unpack_router_logits(self, router_outputs): + total_router_logits = [] + total_expert_indexes = [] + for router_output in router_outputs: + if len(router_output[0].shape) > 1: + router_logits, expert_indexes = router_output + total_router_logits.append(router_logits) + total_expert_indexes.append(expert_indexes) + return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b6d7fb5871c9c47ca7789eec220a7dbeeb8ec7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py @@ -0,0 +1,526 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for GPTSANJapanese.""" +import collections +import json +import os +import re +from typing import List, Optional, Tuple, Union + +import numpy as np + +from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import ( + BatchEncoding, + PreTokenizedInput, + PreTokenizedInputPair, + TextInput, + TextInputPair, + TruncationStrategy, +) +from ...utils import PaddingStrategy, logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} + + +def load_vocab_and_emoji(vocab_file, emoji_file): + """Loads a vocabulary file and emoji file into a dictionary.""" + with open(emoji_file, "r", encoding="utf-8") as f: + emoji = json.loads(f.read()) + + vocab = collections.OrderedDict() + raw_vocab = collections.OrderedDict() + ids_to_tokens = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as f: + token = f.readlines() + token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token] + for idx, b in enumerate(token): + ids_to_tokens[idx] = b + raw_vocab[",".join(b)] = idx + for wd in b: + vocab[wd] = idx + + return vocab, raw_vocab, ids_to_tokens, emoji + + +class GPTSanJapaneseTokenizer(PreTrainedTokenizer): + """ + This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications + - Decoding byte0~byte255 tokens correctly + - Added bagofword token handling + - Return token_type_ids for Prefix-LM model + The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when + decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository + (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input + position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a + sentence of the prefix part and the part after it as a text pair of batch input. + + Example: + + ```python + >>> from transformers import GPTSanJapaneseTokenizer + + >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> # You can confirm both 慶応 and 慶應 are encoded to 17750 + >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"] + [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] + + >>> # Both 慶応 and 慶應 are decoded to 慶応 + >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]) + '吾輩は猫である🐯。実は慶応(慶応)大学出身' + ``` + + Example for Prefix-LM: + + ```python + >>> from transformers import GPTSanJapaneseTokenizer + + >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"] + [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281] + + >>> # Mask for Prefix-LM inputs + >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ``` + + Example for batch encode: + + ```python + >>> from transformers import GPTSanJapaneseTokenizer + + >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"] + [[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]] + + >>> # Mask for Prefix-LM inputs + >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"] + [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]] + + >>> # Mask for padding + >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"] + [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] + ``` + + Args: + vocab_file (`str`): + File containing the vocabulary. + emoji_file (`str`): + File containing the emoji. + unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`): + The token used for unknown charactor + pad_token (`str`, *optional*, defaults to `"<|separator|>"`): + The token used for padding + bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`): + A special token to separate token to prefix part and general input part. + do_clean_text (`bool`, *optional*, defaults to `False`): + Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask", "token_type_ids"] + + def __init__( + self, + vocab_file, + emoji_file, + unk_token="<|nottoken|>", + pad_token="<|separator|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + sep_token="<|segmenter|>", + do_clean_text=False, + **kwargs, + ): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + if not os.path.isfile(emoji_file): + raise ValueError( + f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" + " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.do_clean_text = do_clean_text + self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file) + self.subword_tokenizer = SubWordJapaneseTokenizer( + vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji + ) + + super().__init__( + unk_token=unk_token, + pad_token=pad_token, + bos_token=bos_token, + eos_token=eos_token, + sep_token=sep_token, + do_clean_text=do_clean_text, + **kwargs, + ) + + @property + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size + def vocab_size(self): + # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab + return len(self.raw_vocab) + + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab + def get_vocab(self): + return dict(self.raw_vocab, **self.added_tokens_encoder) + + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize + def _tokenize(self, text): + return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) + + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.subword_tokenizer.convert_id_to_token(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + words = [] + byte_tokens = [] + for word in tokens: + if word[:6] == "<|byte" and word[-2:] == "|>": + byte_tokens.append(int(word[6:-2])) + else: + if len(byte_tokens) > 0: + words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) + byte_tokens = [] + if word[:7] == "<|emoji" and word[-2:] == "|>": + words.append(self.emoji["emoji_inv"][word]) + elif word == "": + words.append(" ") + elif word == "
": + words.append("\n") + elif word == "": + words.append("\t") + elif word == "": + words.append("▀") + elif word == "": + words.append("ǀ") + elif word == "": + words.append("‖") + elif word == "<|bagoftoken|>": + if len(words) > 0: + words.append(words[-1]) + words.append(words[-1]) + words.append(words[-1]) + elif word.startswith("<|") and word.endswith("|>"): + words.append("") + else: + words.append(word) + if len(byte_tokens) > 0: + words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) + text = "".join(words) + return text + + @property + def default_chat_template(self): + """ + A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role + information. + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + return ( + "{% for message in messages %}" + "{% if not loop.first %}{{ bos_token}}{% endif %}" + "{{ sep_token }}{{ message.content }} {{ eos_token }}" + "{% endfor %}" + ) + + # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + emoji_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] + ) + else: + vocab_file = ( + (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] + ) + emoji_file = ( + (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] + ) + with open(vocab_file, "w", encoding="utf-8") as writer: + for token_index, token in self.ids_to_tokens.items(): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(",".join(token) + "\n") + index += 1 + with open(emoji_file, "w", encoding="utf-8") as writer: + json.dump(self.emoji, writer) + return vocab_file, emoji_file + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + # docstyle-ignore + """ + The tokenizer returns token_type_ids as separators between the Prefix part and the rest. + token_type_ids is 1 for the Prefix part and 0 for the rest of the token. + + Example: + ```python + >>> from transformers import GPTSanJapaneseTokenizer + + >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") + >>> x_token = tokenizer("アイウエ") + >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ | + >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | + + >>> x_token = tokenizer("", prefix_text="アイウエ") + >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG | + >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | + + >>> x_token = tokenizer("ウエ", prefix_text="アイ") + >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ | + >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | + ```""" + prefix_len = 0 + if self.sep_token in self.vocab: + segid = self.vocab[self.sep_token] + if segid in token_ids_0: + prefix_len = token_ids_0.index(segid) + if token_ids_1 is None: + total_len = len(token_ids_0) + else: + total_len = len(token_ids_0 + token_ids_1) + return prefix_len * [1] + (total_len - prefix_len) * [0] + + def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs): + # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation. + # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest. + if add_sep_token is None: + add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly + prepared = self.bos_token if self.bos_token in self.vocab else "" + prepared += prefix_text if prefix_text is not None else "" + if add_sep_token: + prepared += self.sep_token if self.sep_token in self.vocab else "" + prepared += text + return (prepared, kwargs) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair] + ], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + # This tokenizer converts input text pairs into Prefix input and subsequent input + if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list): + # As a single text with an explicit un-prefix position + batch_prefix_texts = [] + for pref, txt in batch_text_or_text_pairs: + batch_prefix_texts.append(pref + self.sep_token + txt) + batch_text_or_text_pairs = batch_prefix_texts + + return super()._batch_encode_plus( + batch_text_or_text_pairs, + add_special_tokens, + padding_strategy, + truncation_strategy, + max_length, + stride, + is_split_into_words, + pad_to_multiple_of, + return_tensors, + return_token_type_ids, + return_attention_mask, + return_overflowing_tokens, + return_special_tokens_mask, + return_offsets_mapping, + return_length, + verbose, + ) + + +class SubWordJapaneseTokenizer(object): + """ + This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications + - Decoding byte0~byte255 tokens correctly + - Added bagofword token handling + + https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the + original repository. + + MIT License + + Copyright (c) 2020 tanreinama + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of + the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO + THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + """ + + # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__ + def __init__(self, vocab, ids_to_tokens, emoji): + self.vocab = vocab # same as swe + self.ids_to_tokens = ids_to_tokens # same as bpe + self.emoji = emoji + self.maxlen = np.max([len(w) for w in self.vocab.keys()]) + self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") + self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") + self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") + self.content_repatter4 = re.compile( + r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" + ) + self.content_repatter5 = re.compile( + r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" + ) + self.content_repatter6 = re.compile( + r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" + ) + keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" + blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" + self.content_trans1 = str.maketrans({k: "" for k in keisen + blocks}) + + # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__ + def __len__(self): + return len(self.ids_to_tokens) + + # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text + def clean_text(self, content): + content = self.content_repatter1.sub("", content) + content = self.content_repatter2.sub("", content) + content = self.content_repatter3.sub("", content) + content = self.content_repatter4.sub("", content) + content = self.content_repatter5.sub("", content) + content = self.content_repatter6.sub("", content) + content = content.translate(self.content_trans1) + while "" in content: + content = content.replace("", "") + return content + + # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize + def tokenize(self, text, clean=False): + text = text.replace(" ", "") + text = text.replace(" ", "") + text = text.replace("\r\n", "
") + text = text.replace("\n", "
") + text = text.replace("\r", "
") + text = text.replace("\t", "") + text = text.replace("—", "ー") + text = text.replace("−", "ー") + for k, v in self.emoji["emoji"].items(): + if k in text: + text = text.replace(k, v) + if clean: + text = self.clean_text(text) + + def check_simbol(x): + e = x.encode() + if len(x) == 1 and len(e) == 2: + c = (int(e[0]) << 8) + int(e[1]) + if ( + (c >= 0xC2A1 and c <= 0xC2BF) + or (c >= 0xC780 and c <= 0xC783) + or (c >= 0xCAB9 and c <= 0xCBBF) + or (c >= 0xCC80 and c <= 0xCDA2) + ): + return True + return False + + def checku2e(x): + e = x.encode() + if len(x) == 1 and len(e) == 3: + c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) + if c >= 0xE28080 and c <= 0xE2B07F: + return True + return False + + pos = 0 + result = [] + while pos < len(text): + end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 + candidates = [] # (token_id, token, pos) + for e in range(end, pos, -1): + wd = text[pos:e] + if wd in self.vocab: + if wd[0] == "<" and len(wd) > 2: + candidates = [(self.vocab[wd], wd, e)] + break + else: + candidates.append((self.vocab[wd], wd, e)) + if len(candidates) > 0: + # the smallest token_id is adopted + _, wd, e = sorted(candidates, key=lambda x: x[0])[0] + result.append(wd) + pos = e + else: + end = pos + 1 + wd = text[pos:end] + if check_simbol(wd): + result.append("") + elif checku2e(wd): + result.append("") + else: + for i in wd.encode("utf-8"): + result.append("<|byte%d|>" % i) + pos = end + return result + + def convert_id_to_token(self, index): + return self.ids_to_tokens[index][0] diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/marian/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..56f0a4e86afba2fc662d686fbe09daac2fee5081 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/__init__.py @@ -0,0 +1,113 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_flax_available, + is_sentencepiece_available, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_marian": ["MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarianConfig", "MarianOnnxConfig"], +} + +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_marian"] = ["MarianTokenizer"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_marian"] = [ + "MARIAN_PRETRAINED_MODEL_ARCHIVE_LIST", + "MarianForCausalLM", + "MarianModel", + "MarianMTModel", + "MarianPreTrainedModel", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_marian"] = ["TFMarianModel", "TFMarianMTModel", "TFMarianPreTrainedModel"] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_marian"] = ["FlaxMarianModel", "FlaxMarianMTModel", "FlaxMarianPreTrainedModel"] + +if TYPE_CHECKING: + from .configuration_marian import MARIAN_PRETRAINED_CONFIG_ARCHIVE_MAP, MarianConfig, MarianOnnxConfig + + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_marian import MarianTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_marian import ( + MARIAN_PRETRAINED_MODEL_ARCHIVE_LIST, + MarianForCausalLM, + MarianModel, + MarianMTModel, + MarianPreTrainedModel, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_marian import TFMarianModel, TFMarianMTModel, TFMarianPreTrainedModel + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_marian import FlaxMarianModel, FlaxMarianMTModel, FlaxMarianPreTrainedModel + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c487a01f9457226d7389eb81a2afd1137820c64 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/configuration_marian.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/configuration_marian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72422db92fd86ad83a375cdd8177089305c3e64a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/configuration_marian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_tatoeba_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_tatoeba_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11c8409885e82b4a3b4611b1d48b3af3a59f4eb2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_tatoeba_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f362859df4e6905a8d7c5d139837b65b510e87c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/convert_marian_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_flax_marian.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_flax_marian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89c3e1ce0d0ae6c8ff67dc762de995d3e9e49831 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_flax_marian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_marian.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_marian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf0348d03d54af90bda764c678279caf2ca43810 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_marian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_tf_marian.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_tf_marian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee7fef25038f239f6f36cf92d5463da68be76c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/modeling_tf_marian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/tokenization_marian.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/tokenization_marian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f91451501bcb8ff3bd75ea8fbe0171d1647998 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/marian/__pycache__/tokenization_marian.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/configuration_marian.py b/venv/lib/python3.10/site-packages/transformers/models/marian/configuration_marian.py new file mode 100644 index 0000000000000000000000000000000000000000..5921fde981be26cfaad725adbf91924551505a9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/configuration_marian.py @@ -0,0 +1,390 @@ +# coding=utf-8 +# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Marian model configuration""" +from collections import OrderedDict +from typing import Any, Mapping, Optional + +from ... import PreTrainedTokenizer +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast +from ...onnx.utils import compute_effective_axis_dimension +from ...utils import TensorType, is_torch_available, logging + + +logger = logging.get_logger(__name__) + + +class MarianConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an + Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Marian + [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 58101): + Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`MarianModel`] or [`TFMarianModel`]. + d_model (`int`, *optional*, defaults to 1024): + Dimensionality of the layers and the pooler layer. + encoder_layers (`int`, *optional*, defaults to 12): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 12): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + max_position_embeddings (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + encoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + scale_embedding (`bool`, *optional*, defaults to `False`): + Scale embeddings by diving by sqrt(d_model). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models) + forced_eos_token_id (`int`, *optional*, defaults to 0): + The id of the token to force as the last generated token when `max_length` is reached. Usually set to + `eos_token_id`. + + Examples: + + ```python + >>> from transformers import MarianModel, MarianConfig + + >>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration + >>> configuration = MarianConfig() + + >>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration + >>> model = MarianModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "marian" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=58101, + decoder_vocab_size=None, + max_position_embeddings=1024, + encoder_layers=12, + encoder_ffn_dim=4096, + encoder_attention_heads=16, + decoder_layers=12, + decoder_ffn_dim=4096, + decoder_attention_heads=16, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + use_cache=True, + is_encoder_decoder=True, + activation_function="gelu", + d_model=1024, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + decoder_start_token_id=58100, + scale_embedding=False, + pad_token_id=58100, + eos_token_id=0, + forced_eos_token_id=0, + share_encoder_decoder_embeddings=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.decoder_vocab_size = decoder_vocab_size or vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings + super().__init__( + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + decoder_start_token_id=decoder_start_token_id, + forced_eos_token_id=forced_eos_token_id, + **kwargs, + ) + + +class MarianOnnxConfig(OnnxSeq2SeqConfigWithPast): + @property + # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task in ["default", "seq2seq-lm"]: + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ] + ) + + if self.use_past: + common_inputs["decoder_input_ids"] = {0: "batch"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + else: + common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} + + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + elif self.task == "causal-lm": + # TODO: figure this case out. + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ] + ) + if self.use_past: + num_encoder_layers, _ = self.num_layers + for i in range(num_encoder_layers): + common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + else: + common_inputs = OrderedDict( + [ + ("input_ids", {0: "batch", 1: "encoder_sequence"}), + ("attention_mask", {0: "batch", 1: "encoder_sequence"}), + ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), + ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), + ] + ) + + return common_inputs + + @property + # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs + def outputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task in ["default", "seq2seq-lm"]: + common_outputs = super().outputs + else: + common_outputs = super(OnnxConfigWithPast, self).outputs + if self.use_past: + num_encoder_layers, _ = self.num_layers + for i in range(num_encoder_layers): + common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + return common_outputs + + def _generate_dummy_inputs_for_default_and_seq2seq_lm( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + encoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( + tokenizer, batch_size, seq_length, is_pair, framework + ) + + # Generate decoder inputs + decoder_seq_length = seq_length if not self.use_past else 1 + decoder_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( + tokenizer, batch_size, decoder_seq_length, is_pair, framework + ) + decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} + common_inputs = dict(**encoder_inputs, **decoder_inputs) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch, encoder_seq_length = common_inputs["input_ids"].shape + decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] + num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads + encoder_shape = ( + batch, + num_encoder_attention_heads, + encoder_seq_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + decoder_past_length = decoder_seq_length + 3 + decoder_shape = ( + batch, + num_decoder_attention_heads, + decoder_past_length, + self._config.hidden_size // num_decoder_attention_heads, + ) + + common_inputs["decoder_attention_mask"] = torch.cat( + [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 + ) + + common_inputs["past_key_values"] = [] + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + for _ in range(min_num_layers): + common_inputs["past_key_values"].append( + ( + torch.zeros(decoder_shape), + torch.zeros(decoder_shape), + torch.zeros(encoder_shape), + torch.zeros(encoder_shape), + ) + ) + # TODO: test this. + shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape + for _ in range(min_num_layers, max_num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + return common_inputs + + def _generate_dummy_inputs_for_causal_lm( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + common_inputs = self._generate_dummy_inputs_for_encoder_and_decoder( + tokenizer, batch_size, seq_length, is_pair, framework + ) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + num_encoder_layers, _ = self.num_layers + num_encoder_attention_heads, _ = self.num_attention_heads + past_shape = ( + batch, + num_encoder_attention_heads, + past_key_values_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + + mask_dtype = common_inputs["attention_mask"].dtype + common_inputs["attention_mask"] = torch.cat( + [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 + ) + common_inputs["past_key_values"] = [ + (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) + ] + return common_inputs + + # Copied from BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering + # We renamed this function because Marian models do not have a sequence classification or question answering head + def _generate_dummy_inputs_for_encoder_and_decoder( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + # Copied from OnnxConfig.generate_dummy_inputs + # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension( + batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 + ) + + # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX + token_to_add = tokenizer.num_special_tokens_to_add(is_pair) + seq_length = compute_effective_axis_dimension( + seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add + ) + + # Generate dummy inputs according to compute batch and sequence + dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size + common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) + return common_inputs + + def generate_dummy_inputs( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + if self.task in ["default", "seq2seq-lm"]: + common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + else: + common_inputs = self._generate_dummy_inputs_for_causal_lm( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + return common_inputs + + # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_ + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + if self.task in ["default", "seq2seq-lm"]: + flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) + else: + flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( + flattened_output, name, idx, t + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b548c2b07f460f7250f76067af728369bcf743 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py @@ -0,0 +1,1324 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import datetime +import json +import os +import re +from pathlib import Path +from typing import Tuple + +import yaml +from tqdm import tqdm + +from transformers.models.marian.convert_marian_to_pytorch import ( + FRONT_MATTER_TEMPLATE, + convert, + convert_opus_name_to_hf_name, + download_and_unzip, + get_system_metadata, +) + + +DEFAULT_REPO = "Tatoeba-Challenge" +DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") +LANG_CODE_URL = "https://datahub.io/core/language-codes/r/language-codes-3b2.csv" +ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv" +ISO_PATH = "lang_code_data/iso-639-3.csv" +LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv" +TATOEBA_MODELS_URL = "https://object.pouta.csc.fi/Tatoeba-MT-models" + + +class TatoebaConverter: + """ + Convert Tatoeba-Challenge models to huggingface format. + + Steps: + + 1. Convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). + 2. Rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique + one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en + 3. Select the best model for a particular pair, parse the yml for it and write a model card. By default the + best model is the one listed first in released-model-results, but it's also possible to specify the most + recent one. + """ + + def __init__(self, save_dir="marian_converted"): + assert Path(DEFAULT_REPO).exists(), "need git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git" + self.download_lang_info() + self.model_results = json.load(open("Tatoeba-Challenge/models/released-model-results.json")) + self.alpha3_to_alpha2 = {} + for line in open(ISO_PATH): + parts = line.split("\t") + if len(parts[0]) == 3 and len(parts[3]) == 2: + self.alpha3_to_alpha2[parts[0]] = parts[3] + for line in LANG_CODE_PATH: + parts = line.split(",") + if len(parts[0]) == 3 and len(parts[1]) == 2: + self.alpha3_to_alpha2[parts[0]] = parts[1] + self.model_card_dir = Path(save_dir) + self.tag2name = {} + for key, value in GROUP_MEMBERS.items(): + self.tag2name[key] = value[0] + + def convert_models(self, tatoeba_ids, dry_run=False): + models_to_convert = [self.parse_metadata(x) for x in tatoeba_ids] + save_dir = Path("marian_ckpt") + dest_dir = Path(self.model_card_dir) + dest_dir.mkdir(exist_ok=True) + for model in tqdm(models_to_convert): # k, prepro, download, test_set_url in tqdm(model_list): + if "SentencePiece" not in model["pre-processing"]: + print(f"Skipping {model['release']} because it doesn't appear to use SentencePiece") + continue + if not os.path.exists(save_dir / model["_name"]): + download_and_unzip(f"{TATOEBA_MODELS_URL}/{model['release']}", save_dir / model["_name"]) + # from convert_marian_to_pytorch + opus_language_groups_to_hf = convert_opus_name_to_hf_name + pair_name = opus_language_groups_to_hf(model["_name"]) + convert(save_dir / model["_name"], dest_dir / f"opus-mt-{pair_name}") + self.write_model_card(model, dry_run=dry_run) + + def expand_group_to_two_letter_codes(self, grp_name): + return [self.alpha3_to_alpha2.get(x, x) for x in GROUP_MEMBERS[grp_name][1]] + + def is_group(self, code, name): + return "languages" in name or len(GROUP_MEMBERS.get(code, [])) > 1 + + def get_tags(self, code, name): + if len(code) == 2: + assert "languages" not in name, f"{code}: {name}" + return [code] + elif self.is_group(code, name): + group = self.expand_group_to_two_letter_codes(code) + group.append(code) + return group + else: # zho-> zh + print(f"Three letter monolingual code: {code}") + return [code] + + def resolve_lang_code(self, src, tgt) -> Tuple[str, str]: + src_tags = self.get_tags(src, self.tag2name[src]) + tgt_tags = self.get_tags(tgt, self.tag2name[tgt]) + return src_tags, tgt_tags + + @staticmethod + def model_type_info_from_model_name(name): + info = {"_has_backtranslated_data": False} + if "1m" in name: + info["_data_per_pair"] = str(1e6) + if "2m" in name: + info["_data_per_pair"] = str(2e6) + if "4m" in name: + info["_data_per_pair"] = str(4e6) + if "+bt" in name: + info["_has_backtranslated_data"] = True + if "tuned4" in name: + info["_tuned"] = re.search(r"tuned4[^-]+", name).group() + return info + + def write_model_card(self, model_dict, dry_run=False) -> str: + """ + Construct card from data parsed from YAML and the model's name. upload command: aws s3 sync model_card_dir + s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun + """ + model_dir_url = f"{TATOEBA_MODELS_URL}/{model_dict['release']}" + long_pair = model_dict["_name"].split("-") + assert len(long_pair) == 2, f"got a translation pair {model_dict['_name']} that doesn't appear to be a pair" + short_src = self.alpha3_to_alpha2.get(long_pair[0], long_pair[0]) + short_tgt = self.alpha3_to_alpha2.get(long_pair[1], long_pair[1]) + model_dict["_hf_model_id"] = f"opus-mt-{short_src}-{short_tgt}" + + a3_src, a3_tgt = model_dict["_name"].split("-") + # opus_src_tags, opus_tgt_tags = a3_src.split("+"), a3_tgt.split("+") + + # This messy part tries to deal with language tags in multilingual models, possibly + # not all having three-letter codes + resolved_src_tags, resolved_tgt_tags = self.resolve_lang_code(a3_src, a3_tgt) + a2_src_tags, a2_tgt_tags = [], [] + for tag in resolved_src_tags: + if tag not in self.alpha3_to_alpha2: + a2_src_tags.append(tag) + for tag in resolved_tgt_tags: + if tag not in self.alpha3_to_alpha2: + a2_tgt_tags.append(tag) + + lang_tags = dedup(a2_src_tags + a2_tgt_tags) + src_multilingual, tgt_multilingual = (len(a2_src_tags) > 1), (len(a2_tgt_tags) > 1) + s, t = ",".join(a2_src_tags), ",".join(a2_tgt_tags) + + metadata = { + "hf_name": model_dict["_name"], + "source_languages": s, + "target_languages": t, + "opus_readme_url": f"{model_dir_url}/README.md", + "original_repo": "Tatoeba-Challenge", + "tags": ["translation"], + "languages": lang_tags, + } + lang_tags = l2front_matter(lang_tags) + + metadata["src_constituents"] = list(GROUP_MEMBERS[a3_src][1]) + metadata["tgt_constituents"] = list(GROUP_MEMBERS[a3_tgt][1]) + metadata["src_multilingual"] = src_multilingual + metadata["tgt_multilingual"] = tgt_multilingual + + backtranslated_data = "" + if model_dict["_has_backtranslated_data"]: + backtranslated_data = " with backtranslations" + + multilingual_data = "" + if "_data_per_pair" in model_dict: + multilingual_data = f"* data per pair in multilingual model: {model_dict['_data_per_pair']}\n" + + tuned = "" + if "_tuned" in model_dict: + tuned = f"* multilingual model tuned for: {model_dict['_tuned']}\n" + + model_base_filename = model_dict["release"].split("/")[-1] + download = f"* download original weights: [{model_base_filename}]({model_dir_url}/{model_dict['release']})\n" + + langtoken = "" + if tgt_multilingual: + langtoken = ( + "* a sentence-initial language token is required in the form of >>id<<" + "(id = valid, usually three-letter target language ID)\n" + ) + + metadata.update(get_system_metadata(DEFAULT_REPO)) + + scorestable = "" + for k, v in model_dict.items(): + if "scores" in k: + this_score_table = f"* {k}\n|Test set|score|\n|---|---|\n" + pairs = sorted(v.items(), key=lambda x: x[1], reverse=True) + for pair in pairs: + this_score_table += f"|{pair[0]}|{pair[1]}|\n" + scorestable += this_score_table + + datainfo = "" + if "training-data" in model_dict: + datainfo += "* Training data: \n" + for k, v in model_dict["training-data"].items(): + datainfo += f" * {str(k)}: {str(v)}\n" + if "validation-data" in model_dict: + datainfo += "* Validation data: \n" + for k, v in model_dict["validation-data"].items(): + datainfo += f" * {str(k)}: {str(v)}\n" + if "test-data" in model_dict: + datainfo += "* Test data: \n" + for k, v in model_dict["test-data"].items(): + datainfo += f" * {str(k)}: {str(v)}\n" + + testsetfilename = model_dict["release"].replace(".zip", ".test.txt") + testscoresfilename = model_dict["release"].replace(".zip", ".eval.txt") + testset = f"* test set translations file: [test.txt]({model_dir_url}/{testsetfilename})\n" + testscores = f"* test set scores file: [eval.txt]({model_dir_url}/{testscoresfilename})\n" + + # combine with Tatoeba markdown + readme_url = f"{TATOEBA_MODELS_URL}/{model_dict['_name']}/README.md" + extra_markdown = f""" +### {model_dict['_name']} + +* source language name: {self.tag2name[a3_src]} +* target language name: {self.tag2name[a3_tgt]} +* OPUS readme: [README.md]({readme_url}) +""" + + content = ( + f""" +* model: {model_dict['modeltype']} +* source language code{src_multilingual*'s'}: {', '.join(a2_src_tags)} +* target language code{tgt_multilingual*'s'}: {', '.join(a2_tgt_tags)} +* dataset: opus {backtranslated_data} +* release date: {model_dict['release-date']} +* pre-processing: {model_dict['pre-processing']} +""" + + multilingual_data + + tuned + + download + + langtoken + + datainfo + + testset + + testscores + + scorestable + ) + + content = FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + content + + items = "\n".join([f"* {k}: {v}" for k, v in metadata.items()]) + sec3 = "\n### System Info: \n" + items + content += sec3 + if dry_run: + print("CONTENT:") + print(content) + print("METADATA:") + print(metadata) + return + sub_dir = self.model_card_dir / model_dict["_hf_model_id"] + sub_dir.mkdir(exist_ok=True) + dest = sub_dir / "README.md" + dest.open("w").write(content) + for k, v in metadata.items(): + if isinstance(v, datetime.date): + metadata[k] = datetime.datetime.strftime(v, "%Y-%m-%d") + with open(sub_dir / "metadata.json", "w", encoding="utf-8") as writeobj: + json.dump(metadata, writeobj) + + def download_lang_info(self): + Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True) + import wget + + if not os.path.exists(ISO_PATH): + wget.download(ISO_URL, ISO_PATH) + if not os.path.exists(LANG_CODE_PATH): + wget.download(LANG_CODE_URL, LANG_CODE_PATH) + + def parse_metadata(self, model_name, repo_path=DEFAULT_MODEL_DIR, method="best"): + p = Path(repo_path) / model_name + + def url_to_name(url): + return url.split("/")[-1].split(".")[0] + + if model_name not in self.model_results: + # This is not a language pair, so model results are ambiguous, go by newest + method = "newest" + + if method == "best": + # Sort by how early they appear in released-models-results + results = [url_to_name(model["download"]) for model in self.model_results[model_name]] + ymls = [f for f in os.listdir(p) if f.endswith(".yml") and f[:-4] in results] + ymls.sort(key=lambda x: results.index(x[:-4])) + metadata = yaml.safe_load(open(p / ymls[0])) + metadata.update(self.model_type_info_from_model_name(ymls[0][:-4])) + elif method == "newest": + ymls = [f for f in os.listdir(p) if f.endswith(".yml")] + # Sort by date + ymls.sort( + key=lambda x: datetime.datetime.strptime(re.search(r"\d\d\d\d-\d\d?-\d\d?", x).group(), "%Y-%m-%d") + ) + metadata = yaml.safe_load(open(p / ymls[-1])) + metadata.update(self.model_type_info_from_model_name(ymls[-1][:-4])) + else: + raise NotImplementedError(f"Don't know argument method='{method}' to parse_metadata()") + metadata["_name"] = model_name + return metadata + + +GROUP_MEMBERS = { + # three letter code -> (group/language name, {constituents...} + # if this language is on the target side the constituents can be used as target language codes. + # if the language is on the source side they are supported natively without special codes. + "aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}), + "afa": ( + "Afro-Asiatic languages", + { + "acm", + "afb", + "amh", + "apc", + "ara", + "arq", + "ary", + "arz", + "hau_Latn", + "heb", + "kab", + "mlt", + "rif_Latn", + "shy_Latn", + "som", + "thv", + "tir", + }, + ), + "afr": ("Afrikaans", {"afr"}), + "alv": ( + "Atlantic-Congo languages", + { + "ewe", + "fuc", + "fuv", + "ibo", + "kin", + "lin", + "lug", + "nya", + "run", + "sag", + "sna", + "swh", + "toi_Latn", + "tso", + "umb", + "wol", + "xho", + "yor", + "zul", + }, + ), + "ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}), + "art": ( + "Artificial languages", + { + "afh_Latn", + "avk_Latn", + "dws_Latn", + "epo", + "ido", + "ido_Latn", + "ile_Latn", + "ina_Latn", + "jbo", + "jbo_Cyrl", + "jbo_Latn", + "ldn_Latn", + "lfn_Cyrl", + "lfn_Latn", + "nov_Latn", + "qya", + "qya_Latn", + "sjn_Latn", + "tlh_Latn", + "tzl", + "tzl_Latn", + "vol_Latn", + }, + ), + "aze": ("Azerbaijani", {"aze_Latn"}), + "bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}), + "bel": ("Belarusian", {"bel", "bel_Latn"}), + "ben": ("Bengali", {"ben"}), + "bnt": ( + "Bantu languages", + {"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"}, + ), + "bul": ("Bulgarian", {"bul", "bul_Latn"}), + "cat": ("Catalan", {"cat"}), + "cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}), + "ccs": ("South Caucasian languages", {"kat"}), + "ceb": ("Cebuano", {"ceb"}), + "cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}), + "ces": ("Czech", {"ces"}), + "cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}), + "cpp": ( + "Creoles and pidgins, Portuguese-based", + {"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"}, + ), + "cus": ("Cushitic languages", {"som"}), + "dan": ("Danish", {"dan"}), + "deu": ("German", {"deu"}), + "dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}), + "ell": ("Modern Greek (1453-)", {"ell"}), + "eng": ("English", {"eng"}), + "epo": ("Esperanto", {"epo"}), + "est": ("Estonian", {"est"}), + "euq": ("Basque (family)", {"eus"}), + "eus": ("Basque", {"eus"}), + "fin": ("Finnish", {"fin"}), + "fiu": ( + "Finno-Ugrian languages", + { + "est", + "fin", + "fkv_Latn", + "hun", + "izh", + "kpv", + "krl", + "liv_Latn", + "mdf", + "mhr", + "myv", + "sma", + "sme", + "udm", + "vep", + "vro", + }, + ), + "fra": ("French", {"fra"}), + "gem": ( + "Germanic languages", + { + "afr", + "ang_Latn", + "dan", + "deu", + "eng", + "enm_Latn", + "fao", + "frr", + "fry", + "gos", + "got_Goth", + "gsw", + "isl", + "ksh", + "ltz", + "nds", + "nld", + "nno", + "nob", + "nob_Hebr", + "non_Latn", + "pdc", + "sco", + "stq", + "swe", + "swg", + "yid", + }, + ), + "gle": ("Irish", {"gle"}), + "glg": ("Galician", {"glg"}), + "gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}), + "gmw": ( + "West Germanic languages", + { + "afr", + "ang_Latn", + "deu", + "eng", + "enm_Latn", + "frr", + "fry", + "gos", + "gsw", + "ksh", + "ltz", + "nds", + "nld", + "pdc", + "sco", + "stq", + "swg", + "yid", + }, + ), + "grk": ("Greek languages", {"grc_Grek", "ell"}), + "hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}), + "heb": ("Hebrew", {"heb"}), + "hin": ("Hindi", {"hin"}), + "hun": ("Hungarian", {"hun"}), + "hye": ("Armenian", {"hye", "hye_Latn"}), + "iir": ( + "Indo-Iranian languages", + { + "asm", + "awa", + "ben", + "bho", + "gom", + "guj", + "hif_Latn", + "hin", + "jdt_Cyrl", + "kur_Arab", + "kur_Latn", + "mai", + "mar", + "npi", + "ori", + "oss", + "pan_Guru", + "pes", + "pes_Latn", + "pes_Thaa", + "pnb", + "pus", + "rom", + "san_Deva", + "sin", + "snd_Arab", + "tgk_Cyrl", + "tly_Latn", + "urd", + "zza", + }, + ), + "ilo": ("Iloko", {"ilo"}), + "inc": ( + "Indic languages", + { + "asm", + "awa", + "ben", + "bho", + "gom", + "guj", + "hif_Latn", + "hin", + "mai", + "mar", + "npi", + "ori", + "pan_Guru", + "pnb", + "rom", + "san_Deva", + "sin", + "snd_Arab", + "urd", + }, + ), + "ine": ( + "Indo-European languages", + { + "afr", + "afr_Arab", + "aln", + "ang_Latn", + "arg", + "asm", + "ast", + "awa", + "bel", + "bel_Latn", + "ben", + "bho", + "bjn", + "bos_Latn", + "bre", + "bul", + "bul_Latn", + "cat", + "ces", + "cor", + "cos", + "csb_Latn", + "cym", + "dan", + "deu", + "dsb", + "egl", + "ell", + "eng", + "enm_Latn", + "ext", + "fao", + "fra", + "frm_Latn", + "frr", + "fry", + "gcf_Latn", + "gla", + "gle", + "glg", + "glv", + "gom", + "gos", + "got_Goth", + "grc_Grek", + "gsw", + "guj", + "hat", + "hif_Latn", + "hin", + "hrv", + "hsb", + "hye", + "hye_Latn", + "ind", + "isl", + "ita", + "jdt_Cyrl", + "ksh", + "kur_Arab", + "kur_Latn", + "lad", + "lad_Latn", + "lat_Grek", + "lat_Latn", + "lav", + "lij", + "lit", + "lld_Latn", + "lmo", + "ltg", + "ltz", + "mai", + "mar", + "max_Latn", + "mfe", + "min", + "mkd", + "mwl", + "nds", + "nld", + "nno", + "nob", + "nob_Hebr", + "non_Latn", + "npi", + "oci", + "ori", + "orv_Cyrl", + "oss", + "pan_Guru", + "pap", + "pcd", + "pdc", + "pes", + "pes_Latn", + "pes_Thaa", + "pms", + "pnb", + "pol", + "por", + "prg_Latn", + "pus", + "roh", + "rom", + "ron", + "rue", + "rus", + "rus_Latn", + "san_Deva", + "scn", + "sco", + "sgs", + "sin", + "slv", + "snd_Arab", + "spa", + "sqi", + "srd", + "srp_Cyrl", + "srp_Latn", + "stq", + "swe", + "swg", + "tgk_Cyrl", + "tly_Latn", + "tmw_Latn", + "ukr", + "urd", + "vec", + "wln", + "yid", + "zlm_Latn", + "zsm_Latn", + "zza", + }, + ), + "isl": ("Icelandic", {"isl"}), + "ita": ("Italian", {"ita"}), + "itc": ( + "Italic languages", + { + "arg", + "ast", + "bjn", + "cat", + "cos", + "egl", + "ext", + "fra", + "frm_Latn", + "gcf_Latn", + "glg", + "hat", + "ind", + "ita", + "lad", + "lad_Latn", + "lat_Grek", + "lat_Latn", + "lij", + "lld_Latn", + "lmo", + "max_Latn", + "mfe", + "min", + "mwl", + "oci", + "pap", + "pcd", + "pms", + "por", + "roh", + "ron", + "scn", + "spa", + "srd", + "tmw_Latn", + "vec", + "wln", + "zlm_Latn", + "zsm_Latn", + }, + ), + "jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}), + "jpx": ("Japanese (family)", {"jpn"}), + "kat": ("Georgian", {"kat"}), + "kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}), + "lav": ("Latvian", {"lav"}), + "lit": ("Lithuanian", {"lit"}), + "mkd": ("Macedonian", {"mkd"}), + "mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}), + "msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}), + "mul": ( + "Multiple languages", + { + "abk", + "acm", + "ady", + "afb", + "afh_Latn", + "afr", + "akl_Latn", + "aln", + "amh", + "ang_Latn", + "apc", + "ara", + "arg", + "arq", + "ary", + "arz", + "asm", + "ast", + "avk_Latn", + "awa", + "aze_Latn", + "bak", + "bam_Latn", + "bel", + "bel_Latn", + "ben", + "bho", + "bod", + "bos_Latn", + "bre", + "brx", + "brx_Latn", + "bul", + "bul_Latn", + "cat", + "ceb", + "ces", + "cha", + "che", + "chr", + "chv", + "cjy_Hans", + "cjy_Hant", + "cmn", + "cmn_Hans", + "cmn_Hant", + "cor", + "cos", + "crh", + "crh_Latn", + "csb_Latn", + "cym", + "dan", + "deu", + "dsb", + "dtp", + "dws_Latn", + "egl", + "ell", + "enm_Latn", + "epo", + "est", + "eus", + "ewe", + "ext", + "fao", + "fij", + "fin", + "fkv_Latn", + "fra", + "frm_Latn", + "frr", + "fry", + "fuc", + "fuv", + "gan", + "gcf_Latn", + "gil", + "gla", + "gle", + "glg", + "glv", + "gom", + "gos", + "got_Goth", + "grc_Grek", + "grn", + "gsw", + "guj", + "hat", + "hau_Latn", + "haw", + "heb", + "hif_Latn", + "hil", + "hin", + "hnj_Latn", + "hoc", + "hoc_Latn", + "hrv", + "hsb", + "hun", + "hye", + "iba", + "ibo", + "ido", + "ido_Latn", + "ike_Latn", + "ile_Latn", + "ilo", + "ina_Latn", + "ind", + "isl", + "ita", + "izh", + "jav", + "jav_Java", + "jbo", + "jbo_Cyrl", + "jbo_Latn", + "jdt_Cyrl", + "jpn", + "kab", + "kal", + "kan", + "kat", + "kaz_Cyrl", + "kaz_Latn", + "kek_Latn", + "kha", + "khm", + "khm_Latn", + "kin", + "kir_Cyrl", + "kjh", + "kpv", + "krl", + "ksh", + "kum", + "kur_Arab", + "kur_Latn", + "lad", + "lad_Latn", + "lao", + "lat_Latn", + "lav", + "ldn_Latn", + "lfn_Cyrl", + "lfn_Latn", + "lij", + "lin", + "lit", + "liv_Latn", + "lkt", + "lld_Latn", + "lmo", + "ltg", + "ltz", + "lug", + "lzh", + "lzh_Hans", + "mad", + "mah", + "mai", + "mal", + "mar", + "max_Latn", + "mdf", + "mfe", + "mhr", + "mic", + "min", + "mkd", + "mlg", + "mlt", + "mnw", + "moh", + "mon", + "mri", + "mwl", + "mww", + "mya", + "myv", + "nan", + "nau", + "nav", + "nds", + "niu", + "nld", + "nno", + "nob", + "nob_Hebr", + "nog", + "non_Latn", + "nov_Latn", + "npi", + "nya", + "oci", + "ori", + "orv_Cyrl", + "oss", + "ota_Arab", + "ota_Latn", + "pag", + "pan_Guru", + "pap", + "pau", + "pdc", + "pes", + "pes_Latn", + "pes_Thaa", + "pms", + "pnb", + "pol", + "por", + "ppl_Latn", + "prg_Latn", + "pus", + "quc", + "qya", + "qya_Latn", + "rap", + "rif_Latn", + "roh", + "rom", + "ron", + "rue", + "run", + "rus", + "sag", + "sah", + "san_Deva", + "scn", + "sco", + "sgs", + "shs_Latn", + "shy_Latn", + "sin", + "sjn_Latn", + "slv", + "sma", + "sme", + "smo", + "sna", + "snd_Arab", + "som", + "spa", + "sqi", + "srp_Cyrl", + "srp_Latn", + "stq", + "sun", + "swe", + "swg", + "swh", + "tah", + "tam", + "tat", + "tat_Arab", + "tat_Latn", + "tel", + "tet", + "tgk_Cyrl", + "tha", + "tir", + "tlh_Latn", + "tly_Latn", + "tmw_Latn", + "toi_Latn", + "ton", + "tpw_Latn", + "tso", + "tuk", + "tuk_Latn", + "tur", + "tvl", + "tyv", + "tzl", + "tzl_Latn", + "udm", + "uig_Arab", + "uig_Cyrl", + "ukr", + "umb", + "urd", + "uzb_Cyrl", + "uzb_Latn", + "vec", + "vie", + "vie_Hani", + "vol_Latn", + "vro", + "war", + "wln", + "wol", + "wuu", + "xal", + "xho", + "yid", + "yor", + "yue", + "yue_Hans", + "yue_Hant", + "zho", + "zho_Hans", + "zho_Hant", + "zlm_Latn", + "zsm_Latn", + "zul", + "zza", + }, + ), + "nic": ( + "Niger-Kordofanian languages", + { + "bam_Latn", + "ewe", + "fuc", + "fuv", + "ibo", + "kin", + "lin", + "lug", + "nya", + "run", + "sag", + "sna", + "swh", + "toi_Latn", + "tso", + "umb", + "wol", + "xho", + "yor", + "zul", + }, + ), + "nld": ("Dutch", {"nld"}), + "nor": ("Norwegian", {"nob", "nno"}), + "phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}), + "pol": ("Polish", {"pol"}), + "por": ("Portuguese", {"por"}), + "pqe": ( + "Eastern Malayo-Polynesian languages", + {"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"}, + ), + "roa": ( + "Romance languages", + { + "arg", + "ast", + "cat", + "cos", + "egl", + "ext", + "fra", + "frm_Latn", + "gcf_Latn", + "glg", + "hat", + "ind", + "ita", + "lad", + "lad_Latn", + "lij", + "lld_Latn", + "lmo", + "max_Latn", + "mfe", + "min", + "mwl", + "oci", + "pap", + "pms", + "por", + "roh", + "ron", + "scn", + "spa", + "tmw_Latn", + "vec", + "wln", + "zlm_Latn", + "zsm_Latn", + }, + ), + "ron": ("Romanian", {"ron"}), + "run": ("Rundi", {"run"}), + "rus": ("Russian", {"rus"}), + "sal": ("Salishan languages", {"shs_Latn"}), + "sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}), + "sla": ( + "Slavic languages", + { + "bel", + "bel_Latn", + "bos_Latn", + "bul", + "bul_Latn", + "ces", + "csb_Latn", + "dsb", + "hrv", + "hsb", + "mkd", + "orv_Cyrl", + "pol", + "rue", + "rus", + "slv", + "srp_Cyrl", + "srp_Latn", + "ukr", + }, + ), + "slv": ("Slovenian", {"slv"}), + "spa": ("Spanish", {"spa"}), + "swe": ("Swedish", {"swe"}), + "taw": ("Tai", {"lao", "tha"}), + "tgl": ("Tagalog", {"tgl_Latn"}), + "tha": ("Thai", {"tha"}), + "trk": ( + "Turkic languages", + { + "aze_Latn", + "bak", + "chv", + "crh", + "crh_Latn", + "kaz_Cyrl", + "kaz_Latn", + "kir_Cyrl", + "kjh", + "kum", + "ota_Arab", + "ota_Latn", + "sah", + "tat", + "tat_Arab", + "tat_Latn", + "tuk", + "tuk_Latn", + "tur", + "tyv", + "uig_Arab", + "uig_Cyrl", + "uzb_Cyrl", + "uzb_Latn", + }, + ), + "tur": ("Turkish", {"tur"}), + "ukr": ("Ukrainian", {"ukr"}), + "urd": ("Urdu", {"urd"}), + "urj": ( + "Uralic languages", + { + "est", + "fin", + "fkv_Latn", + "hun", + "izh", + "kpv", + "krl", + "liv_Latn", + "mdf", + "mhr", + "myv", + "sma", + "sme", + "udm", + "vep", + "vro", + }, + ), + "vie": ("Vietnamese", {"vie", "vie_Hani"}), + "war": ("Waray (Philippines)", {"war"}), + "zho": ( + "Chinese", + { + "cjy_Hans", + "cjy_Hant", + "cmn", + "cmn_Bopo", + "cmn_Hang", + "cmn_Hani", + "cmn_Hans", + "cmn_Hant", + "cmn_Hira", + "cmn_Kana", + "cmn_Latn", + "cmn_Yiii", + "gan", + "hak_Hani", + "lzh", + "lzh_Bopo", + "lzh_Hang", + "lzh_Hani", + "lzh_Hans", + "lzh_Hira", + "lzh_Kana", + "lzh_Yiii", + "nan", + "nan_Hani", + "wuu", + "wuu_Bopo", + "wuu_Hani", + "wuu_Latn", + "yue", + "yue_Bopo", + "yue_Hang", + "yue_Hani", + "yue_Hans", + "yue_Hant", + "yue_Hira", + "yue_Kana", + "zho", + "zho_Hans", + "zho_Hant", + }, + ), + "zle": ("East Slavic languages", {"bel", "orv_Cyrl", "bel_Latn", "rus", "ukr", "rue"}), + "zls": ("South Slavic languages", {"bos_Latn", "bul", "bul_Latn", "hrv", "mkd", "slv", "srp_Cyrl", "srp_Latn"}), + "zlw": ("West Slavic languages", {"csb_Latn", "dsb", "hsb", "pol", "ces"}), +} + + +def l2front_matter(langs): + return "".join(f"- {l}\n" for l in langs) + + +def dedup(lst): + """Preservers order""" + new_lst = [] + for item in lst: + if not item or item in new_lst: + continue + else: + new_lst.append(item) + return new_lst + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "-m", "--models", action="append", help=" Set flag", required=True, nargs="+", dest="models" + ) + parser.add_argument("-save_dir", "--save_dir", default="marian_converted", help="where to save converted models") + args = parser.parse_args() + resolver = TatoebaConverter(save_dir=args.save_dir) + resolver.convert_models(args.models[0]) diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..79afd50955ddd180c35d53a45faac6e73e902472 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/convert_marian_to_pytorch.py @@ -0,0 +1,708 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import os +import socket +import time +import warnings +from pathlib import Path +from typing import Dict, List, Union +from zipfile import ZipFile + +import numpy as np +import torch +from huggingface_hub.hf_api import list_models +from torch import nn +from tqdm import tqdm + +from transformers import MarianConfig, MarianMTModel, MarianTokenizer + + +def remove_suffix(text: str, suffix: str): + if text.endswith(suffix): + return text[: -len(suffix)] + return text # or whatever + + +def remove_prefix(text: str, prefix: str): + if text.startswith(prefix): + return text[len(prefix) :] + return text # or whatever + + +def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict): + sd = {} + for k in opus_dict: + if not k.startswith(layer_prefix): + continue + stripped = remove_prefix(k, layer_prefix) + v = opus_dict[k].T # besides embeddings, everything must be transposed. + sd[converter[stripped]] = torch.tensor(v).squeeze() + return sd + + +def load_layers_(layer_lst: nn.ModuleList, opus_state: dict, converter, is_decoder=False): + for i, layer in enumerate(layer_lst): + layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_" + sd = convert_encoder_layer(opus_state, layer_tag, converter) + layer.load_state_dict(sd, strict=False) + + +def find_pretrained_model(src_lang: str, tgt_lang: str) -> List[str]: + """Find models that can accept src_lang as input and return tgt_lang as output.""" + prefix = "Helsinki-NLP/opus-mt-" + model_list = list_models() + model_ids = [x.modelId for x in model_list if x.modelId.startswith("Helsinki-NLP")] + src_and_targ = [ + remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m + ] # + cant be loaded. + matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b] + return matching + + +def add_emb_entries(wemb, final_bias, n_special_tokens=1): + vsize, d_model = wemb.shape + embs_to_add = np.zeros((n_special_tokens, d_model)) + new_embs = np.concatenate([wemb, embs_to_add]) + bias_to_add = np.zeros((n_special_tokens, 1)) + new_bias = np.concatenate((final_bias, bias_to_add), axis=1) + return new_embs, new_bias + + +def _cast_yaml_str(v): + bool_dct = {"true": True, "false": False} + if not isinstance(v, str): + return v + elif v in bool_dct: + return bool_dct[v] + try: + return int(v) + except (TypeError, ValueError): + return v + + +def cast_marian_config(raw_cfg: Dict[str, str]) -> Dict: + return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()} + + +CONFIG_KEY = "special:model.yml" + + +def load_config_from_state_dict(opus_dict): + import yaml + + cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]]) + yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader) + return cast_marian_config(yaml_cfg) + + +def find_model_file(dest_dir): # this one better + model_files = list(Path(dest_dir).glob("*.npz")) + if len(model_files) != 1: + raise ValueError(f"Found more than one model file: {model_files}") + model_file = model_files[0] + return model_file + + +# Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE +ROM_GROUP = ( + "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT" + "+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co" + "+nap+scn+vec+sc+ro+la" +) +GROUPS = [ + ("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"), + (ROM_GROUP, "ROMANCE"), + ("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"), + ("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"), + ("se+sma+smj+smn+sms", "SAMI"), + ("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"), + ("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages +] +GROUP_TO_OPUS_NAME = { + "opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de", + "opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", + "opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv", + "opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv", + "opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv", + "opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", + "opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi", + "opus-mt-en-ROMANCE": ( + "en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" + "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" + "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la" + ), + "opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv", + "opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no", + "opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms", + "opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", + "opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no", + "opus-mt-ROMANCE-en": ( + "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" + "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" + "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en" + ), + "opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en", + "opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", + "opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no", +} +OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/" +ORG_NAME = "Helsinki-NLP/" + + +def convert_opus_name_to_hf_name(x): + """For OPUS-MT-Train/ DEPRECATED""" + for substr, grp_name in GROUPS: + x = x.replace(substr, grp_name) + return x.replace("+", "_") + + +def convert_hf_name_to_opus_name(hf_model_name): + """ + Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME. + """ + hf_model_name = remove_prefix(hf_model_name, ORG_NAME) + if hf_model_name in GROUP_TO_OPUS_NAME: + opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name] + else: + opus_w_prefix = hf_model_name.replace("_", "+") + return remove_prefix(opus_w_prefix, "opus-mt-") + + +def get_system_metadata(repo_root): + import git + + return { + "helsinki_git_sha": git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha, + "transformers_git_sha": git.Repo(path=".", search_parent_directories=True).head.object.hexsha, + "port_machine": socket.gethostname(), + "port_time": time.strftime("%Y-%m-%d-%H:%M"), + } + + +# docstyle-ignore +FRONT_MATTER_TEMPLATE = """--- +language: +{} +tags: +- translation + +license: apache-2.0 +--- +""" +DEFAULT_REPO = "Tatoeba-Challenge" +DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") + + +def write_model_card( + hf_model_name: str, + repo_root=DEFAULT_REPO, + save_dir=Path("marian_converted"), + dry_run=False, + extra_metadata={}, +) -> str: + """ + Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir + s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun + """ + import pandas as pd + + hf_model_name = remove_prefix(hf_model_name, ORG_NAME) + opus_name: str = convert_hf_name_to_opus_name(hf_model_name) + if repo_root not in ("OPUS-MT-train", "Tatoeba-Challenge"): + raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge") + opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md") + if not (opus_readme_path.exists()): + raise ValueError(f"Readme file {opus_readme_path} not found") + + opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")] + + readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md" + + s, t = ",".join(opus_src), ",".join(opus_tgt) + metadata = { + "hf_name": hf_model_name, + "source_languages": s, + "target_languages": t, + "opus_readme_url": readme_url, + "original_repo": repo_root, + "tags": ["translation"], + } + metadata.update(extra_metadata) + metadata.update(get_system_metadata(repo_root)) + + # combine with opus markdown + + extra_markdown = ( + f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: " + f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n" + ) + + content = opus_readme_path.open().read() + content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model. + splat = content.split("*")[2:] + print(splat[3]) + content = "*".join(splat) + content = ( + FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"]) + + extra_markdown + + "\n* " + + content.replace("download", "download original weights") + ) + + items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()]) + sec3 = "\n### System Info: \n" + items + content += sec3 + if dry_run: + return content, metadata + sub_dir = save_dir / f"opus-mt-{hf_model_name}" + sub_dir.mkdir(exist_ok=True) + dest = sub_dir / "README.md" + dest.open("w").write(content) + pd.Series(metadata).to_json(sub_dir / "metadata.json") + + # if dry_run: + return content, metadata + + +def make_registry(repo_path="Opus-MT-train/models"): + if not (Path(repo_path) / "fr-en" / "README.md").exists(): + raise ValueError( + f"repo_path:{repo_path} does not exist: " + "You must run: git clone git@github.com:Helsinki-NLP/Opus-MT-train.git before calling." + ) + results = {} + for p in Path(repo_path).iterdir(): + n_dash = p.name.count("-") + if n_dash == 0: + continue + else: + lns = list(open(p / "README.md").readlines()) + results[p.name] = _parse_readme(lns) + return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()] + + +def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")): + """Requires 300GB""" + save_dir = Path("marian_ckpt") + dest_dir = Path(dest_dir) + dest_dir.mkdir(exist_ok=True) + save_paths = [] + if model_list is None: + model_list: list = make_registry(repo_path=repo_path) + for k, prepro, download, test_set_url in tqdm(model_list): + if "SentencePiece" not in prepro: # dont convert BPE models. + continue + if not os.path.exists(save_dir / k): + download_and_unzip(download, save_dir / k) + pair_name = convert_opus_name_to_hf_name(k) + convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}") + + save_paths.append(dest_dir / f"opus-mt-{pair_name}") + return save_paths + + +def lmap(f, x) -> List: + return list(map(f, x)) + + +def fetch_test_set(test_set_url): + import wget + + fname = wget.download(test_set_url, "opus_test.txt") + lns = Path(fname).open().readlines() + src = lmap(str.strip, lns[::4]) + gold = lmap(str.strip, lns[1::4]) + mar_model = lmap(str.strip, lns[2::4]) + if not (len(gold) == len(mar_model) == len(src)): + raise ValueError(f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched") + os.remove(fname) + return src, mar_model, gold + + +def convert_whole_dir(path=Path("marian_ckpt/")): + for subdir in tqdm(list(path.ls())): + dest_dir = f"marian_converted/{subdir.name}" + if (dest_dir / "pytorch_model.bin").exists(): + continue + convert(source_dir, dest_dir) + + +def _parse_readme(lns): + """Get link and metadata from opus model card equivalent.""" + subres = {} + for ln in [x.strip() for x in lns]: + if not ln.startswith("*"): + continue + ln = ln[1:].strip() + + for k in ["download", "dataset", "models", "model", "pre-processing"]: + if ln.startswith(k): + break + else: + continue + if k in ["dataset", "model", "pre-processing"]: + splat = ln.split(":") + _, v = splat + subres[k] = v + elif k == "download": + v = ln.split("(")[-1][:-1] + subres[k] = v + return subres + + +def save_tokenizer_config(dest_dir: Path, separate_vocabs=False): + dname = dest_dir.name.split("-") + dct = {"target_lang": dname[-1], "source_lang": "-".join(dname[:-1]), "separate_vocabs": separate_vocabs} + save_json(dct, dest_dir / "tokenizer_config.json") + + +def add_to_vocab_(vocab: Dict[str, int], special_tokens: List[str]): + start = max(vocab.values()) + 1 + added = 0 + for tok in special_tokens: + if tok in vocab: + continue + vocab[tok] = start + added + added += 1 + return added + + +def find_vocab_file(model_dir): + return list(model_dir.glob("*vocab.yml"))[0] + + +def find_src_vocab_file(model_dir): + return list(model_dir.glob("*src.vocab.yml"))[0] + + +def find_tgt_vocab_file(model_dir): + return list(model_dir.glob("*trg.vocab.yml"))[0] + + +def add_special_tokens_to_vocab(model_dir: Path, separate_vocab=False) -> None: + if separate_vocab: + vocab = load_yaml(find_src_vocab_file(model_dir)) + vocab = {k: int(v) for k, v in vocab.items()} + num_added = add_to_vocab_(vocab, [""]) + save_json(vocab, model_dir / "vocab.json") + + vocab = load_yaml(find_tgt_vocab_file(model_dir)) + vocab = {k: int(v) for k, v in vocab.items()} + num_added = add_to_vocab_(vocab, [""]) + save_json(vocab, model_dir / "target_vocab.json") + save_tokenizer_config(model_dir, separate_vocabs=separate_vocab) + else: + vocab = load_yaml(find_vocab_file(model_dir)) + vocab = {k: int(v) for k, v in vocab.items()} + num_added = add_to_vocab_(vocab, [""]) + print(f"added {num_added} tokens to vocab") + save_json(vocab, model_dir / "vocab.json") + save_tokenizer_config(model_dir) + + +def check_equal(marian_cfg, k1, k2): + v1, v2 = marian_cfg[k1], marian_cfg[k2] + if v1 != v2: + raise ValueError(f"hparams {k1},{k2} differ: {v1} != {v2}") + + +def check_marian_cfg_assumptions(marian_cfg): + assumed_settings = { + "layer-normalization": False, + "right-left": False, + "transformer-ffn-depth": 2, + "transformer-aan-depth": 2, + "transformer-no-projection": False, + "transformer-postprocess-emb": "d", + "transformer-postprocess": "dan", # Dropout, add, normalize + "transformer-preprocess": "", + "type": "transformer", + "ulr-dim-emb": 0, + "dec-cell-base-depth": 2, + "dec-cell-high-depth": 1, + "transformer-aan-nogate": False, + } + for k, v in assumed_settings.items(): + actual = marian_cfg[k] + if actual != v: + raise ValueError(f"Unexpected config value for {k} expected {v} got {actual}") + + +BIAS_KEY = "decoder_ff_logit_out_b" +BART_CONVERTER = { # for each encoder and decoder layer + "self_Wq": "self_attn.q_proj.weight", + "self_Wk": "self_attn.k_proj.weight", + "self_Wv": "self_attn.v_proj.weight", + "self_Wo": "self_attn.out_proj.weight", + "self_bq": "self_attn.q_proj.bias", + "self_bk": "self_attn.k_proj.bias", + "self_bv": "self_attn.v_proj.bias", + "self_bo": "self_attn.out_proj.bias", + "self_Wo_ln_scale": "self_attn_layer_norm.weight", + "self_Wo_ln_bias": "self_attn_layer_norm.bias", + "ffn_W1": "fc1.weight", + "ffn_b1": "fc1.bias", + "ffn_W2": "fc2.weight", + "ffn_b2": "fc2.bias", + "ffn_ffn_ln_scale": "final_layer_norm.weight", + "ffn_ffn_ln_bias": "final_layer_norm.bias", + # Decoder Cross Attention + "context_Wk": "encoder_attn.k_proj.weight", + "context_Wo": "encoder_attn.out_proj.weight", + "context_Wq": "encoder_attn.q_proj.weight", + "context_Wv": "encoder_attn.v_proj.weight", + "context_bk": "encoder_attn.k_proj.bias", + "context_bo": "encoder_attn.out_proj.bias", + "context_bq": "encoder_attn.q_proj.bias", + "context_bv": "encoder_attn.v_proj.bias", + "context_Wo_ln_scale": "encoder_attn_layer_norm.weight", + "context_Wo_ln_bias": "encoder_attn_layer_norm.bias", +} + + +class OpusState: + def __init__(self, source_dir, eos_token_id=0): + npz_path = find_model_file(source_dir) + self.state_dict = np.load(npz_path) + cfg = load_config_from_state_dict(self.state_dict) + if cfg["dim-vocabs"][0] != cfg["dim-vocabs"][1]: + raise ValueError + if "Wpos" in self.state_dict: + raise ValueError("Wpos key in state dictionary") + self.state_dict = dict(self.state_dict) + if cfg["tied-embeddings-all"]: + cfg["tied-embeddings-src"] = True + cfg["tied-embeddings"] = True + self.share_encoder_decoder_embeddings = cfg["tied-embeddings-src"] + + # create the tokenizer here because we need to know the eos_token_id + self.source_dir = source_dir + self.tokenizer = self.load_tokenizer() + # retrieve EOS token and set correctly + tokenizer_has_eos_token_id = ( + hasattr(self.tokenizer, "eos_token_id") and self.tokenizer.eos_token_id is not None + ) + eos_token_id = self.tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0 + + if cfg["tied-embeddings-src"]: + self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1) + self.pad_token_id = self.wemb.shape[0] - 1 + cfg["vocab_size"] = self.pad_token_id + 1 + else: + self.wemb, _ = add_emb_entries(self.state_dict["encoder_Wemb"], self.state_dict[BIAS_KEY], 1) + self.dec_wemb, self.final_bias = add_emb_entries( + self.state_dict["decoder_Wemb"], self.state_dict[BIAS_KEY], 1 + ) + # still assuming that vocab size is same for encoder and decoder + self.pad_token_id = self.wemb.shape[0] - 1 + cfg["vocab_size"] = self.pad_token_id + 1 + cfg["decoder_vocab_size"] = self.pad_token_id + 1 + + if cfg["vocab_size"] != self.tokenizer.vocab_size: + raise ValueError( + f"Original vocab size {cfg['vocab_size']} and new vocab size {len(self.tokenizer.encoder)} mismatched." + ) + + # self.state_dict['Wemb'].sha + self.state_keys = list(self.state_dict.keys()) + if "Wtype" in self.state_dict: + raise ValueError("Wtype key in state dictionary") + self._check_layer_entries() + self.cfg = cfg + hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape + if hidden_size != cfg["dim-emb"]: + raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched") + + # Process decoder.yml + decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml")) + check_marian_cfg_assumptions(cfg) + self.hf_config = MarianConfig( + vocab_size=cfg["vocab_size"], + decoder_vocab_size=cfg.get("decoder_vocab_size", cfg["vocab_size"]), + share_encoder_decoder_embeddings=cfg["tied-embeddings-src"], + decoder_layers=cfg["dec-depth"], + encoder_layers=cfg["enc-depth"], + decoder_attention_heads=cfg["transformer-heads"], + encoder_attention_heads=cfg["transformer-heads"], + decoder_ffn_dim=cfg["transformer-dim-ffn"], + encoder_ffn_dim=cfg["transformer-dim-ffn"], + d_model=cfg["dim-emb"], + activation_function=cfg["transformer-ffn-activation"], + pad_token_id=self.pad_token_id, + eos_token_id=eos_token_id, + forced_eos_token_id=eos_token_id, + bos_token_id=0, + max_position_embeddings=cfg["dim-emb"], + scale_embedding=True, + normalize_embedding="n" in cfg["transformer-preprocess"], + static_position_embeddings=not cfg["transformer-train-position-embeddings"], + tie_word_embeddings=cfg["tied-embeddings"], + dropout=0.1, # see opus-mt-train repo/transformer-dropout param. + # default: add_final_layer_norm=False, + num_beams=decoder_yml["beam-size"], + decoder_start_token_id=self.pad_token_id, + bad_words_ids=[[self.pad_token_id]], + max_length=512, + ) + + def _check_layer_entries(self): + self.encoder_l1 = self.sub_keys("encoder_l1") + self.decoder_l1 = self.sub_keys("decoder_l1") + self.decoder_l2 = self.sub_keys("decoder_l2") + if len(self.encoder_l1) != 16: + warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}") + if len(self.decoder_l1) != 26: + warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") + if len(self.decoder_l2) != 26: + warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") + + @property + def extra_keys(self): + extra = [] + for k in self.state_keys: + if ( + k.startswith("encoder_l") + or k.startswith("decoder_l") + or k in [CONFIG_KEY, "Wemb", "encoder_Wemb", "decoder_Wemb", "Wpos", "decoder_ff_logit_out_b"] + ): + continue + else: + extra.append(k) + return extra + + def sub_keys(self, layer_prefix): + return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)] + + def load_tokenizer(self): + # save tokenizer + add_special_tokens_to_vocab(self.source_dir, not self.share_encoder_decoder_embeddings) + return MarianTokenizer.from_pretrained(str(self.source_dir)) + + def load_marian_model(self) -> MarianMTModel: + state_dict, cfg = self.state_dict, self.hf_config + + if not cfg.static_position_embeddings: + raise ValueError("config.static_position_embeddings should be True") + model = MarianMTModel(cfg) + + if "hidden_size" in cfg.to_dict(): + raise ValueError("hidden_size is in config") + load_layers_( + model.model.encoder.layers, + state_dict, + BART_CONVERTER, + ) + load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True) + + # handle tensors not associated with layers + if self.cfg["tied-embeddings-src"]: + wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb)) + bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias)) + model.model.shared.weight = wemb_tensor + model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared + else: + wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb)) + model.model.encoder.embed_tokens.weight = wemb_tensor + + decoder_wemb_tensor = nn.Parameter(torch.FloatTensor(self.dec_wemb)) + bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias)) + model.model.decoder.embed_tokens.weight = decoder_wemb_tensor + + model.final_logits_bias = bias_tensor + + if "Wpos" in state_dict: + print("Unexpected: got Wpos") + wpos_tensor = torch.tensor(state_dict["Wpos"]) + model.model.encoder.embed_positions.weight = wpos_tensor + model.model.decoder.embed_positions.weight = wpos_tensor + + if cfg.normalize_embedding: + if "encoder_emb_ln_scale_pre" not in state_dict: + raise ValueError("encoder_emb_ln_scale_pre is not in state dictionary") + raise NotImplementedError("Need to convert layernorm_embedding") + + if self.extra_keys: + raise ValueError(f"Failed to convert {self.extra_keys}") + + if model.get_input_embeddings().padding_idx != self.pad_token_id: + raise ValueError( + f"Padding tokens {model.get_input_embeddings().padding_idx} and {self.pad_token_id} mismatched" + ) + return model + + +def download_and_unzip(url, dest_dir): + try: + import wget + except ImportError: + raise ImportError("you must pip install wget") + + filename = wget.download(url) + unzip(filename, dest_dir) + os.remove(filename) + + +def convert(source_dir: Path, dest_dir): + dest_dir = Path(dest_dir) + dest_dir.mkdir(exist_ok=True) + + opus_state = OpusState(source_dir) + + # save tokenizer + opus_state.tokenizer.save_pretrained(dest_dir) + + # save_json(opus_state.cfg, dest_dir / "marian_original_config.json") + # ^^ Uncomment to save human readable marian config for debugging + + model = opus_state.load_marian_model() + model = model.half() + model.save_pretrained(dest_dir) + model.from_pretrained(dest_dir) # sanity check + + +def load_yaml(path): + import yaml + + with open(path, encoding="utf-8") as f: + return yaml.load(f, Loader=yaml.BaseLoader) + + +def save_json(content: Union[Dict, List], path: str) -> None: + with open(path, "w") as f: + json.dump(content, f) + + +def unzip(zip_path: str, dest_dir: str) -> None: + with ZipFile(zip_path, "r") as zipObj: + zipObj.extractall(dest_dir) + + +if __name__ == "__main__": + """ + Tatoeba conversion instructions in scripts/tatoeba/README.md + """ + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument("--src", type=str, help="path to marian model sub dir", default="en-de") + parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.") + args = parser.parse_args() + + source_dir = Path(args.src) + if not source_dir.exists(): + raise ValueError(f"Source directory {source_dir} not found") + dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest + convert(source_dir, dest_dir) diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_flax_marian.py b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_flax_marian.py new file mode 100644 index 0000000000000000000000000000000000000000..2002d60caaa3d25fc9271d253ae9753271a9a5fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_flax_marian.py @@ -0,0 +1,1497 @@ +# coding=utf-8 +# Copyright 2021 The Marian Team Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Flax Marian model.""" + +import math +import random +from functools import partial +from typing import Callable, Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +import numpy as np +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax +from jax.random import PRNGKey + +from ...modeling_flax_outputs import ( + FlaxBaseModelOutput, + FlaxBaseModelOutputWithPastAndCrossAttentions, + FlaxCausalLMOutputWithCrossAttentions, + FlaxSeq2SeqLMOutput, + FlaxSeq2SeqModelOutput, +) +from ...modeling_flax_utils import ( + ACT2FN, + FlaxPreTrainedModel, + append_call_sample_docstring, + append_replace_return_docstrings, + overwrite_call_docstring, +) +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_marian import MarianConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de" +_CONFIG_FOR_DOC = "MarianConfig" + + +MARIAN_START_DOCSTRING = r""" + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`MarianConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +MARIAN_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +MARIAN_ENCODE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +MARIAN_DECODE_INPUTS_DOCSTRING = r""" + Args: + decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + encoder_outputs (`tuple(tuple(jnp.ndarray)`): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +def create_sinusoidal_positions(n_pos, dim): + position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) + sentinel = dim // 2 + dim % 2 + out = np.zeros_like(position_enc) + out[:, 0:sentinel] = np.sin(position_enc[:, 0::2]) + out[:, sentinel:] = np.cos(position_enc[:, 1::2]) + + return jnp.array(out) + + +# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right +def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: + """ + Shift input ids one token to the right. + """ + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) + + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Marian +class FlaxMarianAttention(nn.Module): + config: MarianConfig + embed_dim: int + num_heads: int + dropout: float = 0.0 + causal: bool = False + bias: bool = True + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self) -> None: + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {self.num_heads})." + ) + + dense = partial( + nn.Dense, + self.embed_dim, + use_bias=self.bias, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() + self.out_proj = dense() + + self.dropout_layer = nn.Dropout(rate=self.dropout) + + if self.causal: + self.causal_mask = make_causal_mask( + jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" + ) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states: jnp.ndarray, + key_value_states: Optional[jnp.ndarray] = None, + attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size = hidden_states.shape[0] + + # get query proj + query_states = self.q_proj(hidden_states) + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self.k_proj(key_value_states) + value_states = self.v_proj(key_value_states) + else: + # self_attention + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = self._split_heads(query_states) + key_states = self._split_heads(key_states) + value_states = self._split_heads(value_states) + + # handle cache prepare causal attention mask + if self.causal: + query_length, key_length = query_states.shape[1], key_states.shape[1] + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + # combine masks if needed + if attention_mask is not None and self.causal: + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + elif self.causal: + attention_mask = causal_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.causal and (self.has_variable("cache", "cached_key") or init_cache): + key_states, value_states, attention_mask = self._concatenate_to_cache( + key_states, value_states, query_states, attention_mask + ) + + # Convert the boolean attention mask to an attention bias. + if attention_mask is not None: + # attention mask in the form of attention bias + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + else: + attention_bias = None + + dropout_rng = None + if not deterministic and self.dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + attn_weights = dot_product_attention_weights( + query_states, + key_states, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.dropout, + broadcast_dropout=True, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->Marian +class FlaxMarianEncoderLayer(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self) -> None: + self.embed_dim = self.config.d_model + self.self_attn = FlaxMarianAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.encoder_attention_heads, + dropout=self.config.attention_dropout, + dtype=self.dtype, + ) + self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + self.activation_fn = ACT2FN[self.config.activation_function] + self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) + self.fc1 = nn.Dense( + self.config.encoder_ffn_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.fc2 = nn.Dense( + self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) + ) + self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + residual = hidden_states + hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) + + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Marian +class FlaxMarianEncoderLayerCollection(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.layers = [ + FlaxMarianEncoderLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.encoder_layers) + ] + self.layerdrop = self.config.encoder_layerdrop + + def __call__( + self, + hidden_states, + attention_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for encoder_layer in self.layers: + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if not deterministic and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + output_attentions, + deterministic, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = (hidden_states, all_hidden_states, all_attentions) + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->Marian +class FlaxMarianDecoderLayer(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self) -> None: + self.embed_dim = self.config.d_model + self.self_attn = FlaxMarianAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + causal=True, + dtype=self.dtype, + ) + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + self.activation_fn = ACT2FN[self.config.activation_function] + self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) + + self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.encoder_attn = FlaxMarianAttention( + config=self.config, + embed_dim=self.embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + dtype=self.dtype, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + self.fc1 = nn.Dense( + self.config.decoder_ffn_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.fc2 = nn.Dense( + self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) + ) + self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) + + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache + ) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + ) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Marian +class FlaxMarianDecoderLayerCollection(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.layers = [ + FlaxMarianDecoderLayer(self.config, name=str(i), dtype=self.dtype) + for i in range(self.config.decoder_layers) + ] + self.layerdrop = self.config.decoder_layerdrop + + def __call__( + self, + hidden_states, + attention_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if not deterministic and (dropout_probability < self.layerdrop): + layer_outputs = (None, None, None) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + init_cache=init_cache, + output_attentions=output_attentions, + deterministic=deterministic, + ) + + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +class FlaxMarianEncoder(nn.Module): + config: MarianConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.max_source_positions = self.config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 + + self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim) + self.layers = FlaxMarianEncoderLayerCollection(self.config, self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + positions = jnp.take(self.embed_positions, position_ids, axis=0) + # explicitly cast the positions here, since self.embed_positions are not registered as parameters + positions = positions.astype(inputs_embeds.dtype) + + hidden_states = inputs_embeds + positions + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutput( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class FlaxMarianDecoder(nn.Module): + config: MarianConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.max_target_positions = self.config.max_position_embeddings + self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 + + self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim) + self.layers = FlaxMarianDecoderLayerCollection(self.config, self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + # embed positions + positions = jnp.take(self.embed_positions, position_ids, axis=0) + # explicitly cast the positions here, since self.embed_positions are not registered as parameters + positions = positions.astype(inputs_embeds.dtype) + + hidden_states = inputs_embeds + positions + + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +class FlaxMarianModule(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + + def setup(self): + self.shared = nn.Embed( + self.config.vocab_size, + self.config.d_model, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.encoder = FlaxMarianEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared) + self.decoder = FlaxMarianDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared) + + def _get_encoder_module(self): + return self.encoder + + def _get_decoder_module(self): + return self.decoder + + def __call__( + self, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return FlaxSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +class FlaxMarianPreTrainedModel(FlaxPreTrainedModel): + config_class = MarianConfig + base_model_prefix: str = "model" + module_class: nn.Module = None + + def __init__( + self, + config: MarianConfig, + input_shape: Tuple[int] = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + # make sure initialization pass will work for FlaxMarianForSequenceClassificationModule + input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id) + attention_mask = jnp.ones_like(input_ids) + decoder_input_ids = input_ids + decoder_attention_mask = jnp.ones_like(input_ids) + + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init( + rngs, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + )["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length, encoder_outputs): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): + `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: + `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) + is a sequence of hidden-states at the output of the last layer of the encoder. Used in the + cross-attention of the decoder. + """ + # init input variables to retrieve cache + decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + decoder_position_ids = jnp.broadcast_to( + jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape + ) + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) + + init_variables = self.module.init( + jax.random.PRNGKey(0), + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + init_cache=True, + method=_decoder_forward, # we only need to call the decoder to init the cache + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings(MARIAN_ENCODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MarianConfig) + def encode( + self, + input_ids: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, FlaxMarianMTModel + + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=64, return_tensors="jax") + >>> encoder_outputs = model.encode(**inputs) + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + if position_ids is None: + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): + encode_module = module._get_encoder_module() + return encode_module(input_ids, attention_mask, position_ids, **kwargs) + + return self.module.apply( + {"params": params or self.params}, + input_ids=jnp.array(input_ids, dtype="i4"), + attention_mask=jnp.array(attention_mask, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + method=_encoder_forward, + ) + + @add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MarianConfig) + def decode( + self, + decoder_input_ids, + encoder_outputs, + encoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> import jax.numpy as jnp + >>> from transformers import AutoTokenizer, FlaxMarianMTModel + + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=64, return_tensors="jax") + >>> encoder_outputs = model.encode(**inputs) + + >>> decoder_start_token_id = model.config.decoder_start_token_id + >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id + + >>> outputs = model.decode(decoder_input_ids, encoder_outputs) + >>> last_decoder_hidden_states = outputs.last_hidden_state + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + encoder_hidden_states = encoder_outputs[0] + if encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxMarianAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + return decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past = outputs + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past = outputs + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING) + def __call__( + self, + input_ids: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + decoder_input_ids: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + position_ids: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + # prepare encoder inputs + if attention_mask is None: + attention_mask = jnp.ones_like(input_ids) + if position_ids is None: + batch_size, sequence_length = input_ids.shape + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + # prepare decoder inputs + if decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id + ) + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones_like(decoder_input_ids) + if decoder_position_ids is None: + batch_size, sequence_length = decoder_input_ids.shape + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} + + return self.module.apply( + {"params": params or self.params}, + input_ids=jnp.array(input_ids, dtype="i4"), + attention_mask=jnp.array(attention_mask, dtype="i4"), + position_ids=jnp.array(position_ids, dtype="i4"), + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + ) + + +@add_start_docstrings( + "The bare Marian Model transformer outputting raw hidden-states without any specific head on top.", + MARIAN_START_DOCSTRING, +) +class FlaxMarianModel(FlaxMarianPreTrainedModel): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + module_class = FlaxMarianModule + + +append_call_sample_docstring(FlaxMarianModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) + + +class FlaxMarianMTModule(nn.Module): + config: MarianConfig + dtype: jnp.dtype = jnp.float32 + bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros + + def setup(self): + self.model = FlaxMarianModule(config=self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.model.shared.num_embeddings, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings)) + + def _get_encoder_module(self): + return self.model.encoder + + def _get_decoder_module(self): + return self.model.decoder + + def __call__( + self, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + position_ids=position_ids, + decoder_position_ids=decoder_position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = self.model.variables["params"]["shared"]["embedding"] + lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) + else: + lm_logits = self.lm_head(hidden_states) + + lm_logits += self.final_logits_bias.astype(self.dtype) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return output + + return FlaxSeq2SeqLMOutput( + logits=lm_logits, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + "The MARIAN Model with a language modeling head. Can be used for translation.", MARIAN_START_DOCSTRING +) +class FlaxMarianMTModel(FlaxMarianPreTrainedModel): + module_class = FlaxMarianMTModule + dtype: jnp.dtype = jnp.float32 + + @add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MarianConfig) + def decode( + self, + decoder_input_ids, + encoder_outputs, + encoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + r""" + Returns: + + Example: + + ```python + >>> import jax.numpy as jnp + >>> from transformers import AutoTokenizer, FlaxMarianMTModel + + >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + + >>> text = "My friends are cool but they eat too many carbs." + >>> inputs = tokenizer(text, max_length=64, return_tensors="jax") + >>> encoder_outputs = model.encode(**inputs) + + >>> decoder_start_token_id = model.config.decoder_start_token_id + >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id + + >>> outputs = model.decode(decoder_input_ids, encoder_outputs) + >>> logits = outputs.logits + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + encoder_hidden_states = encoder_outputs[0] + if encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxMarianAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): + decoder_module = module._get_decoder_module() + outputs = decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = module.model.variables["params"]["shared"]["embedding"] + lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) + else: + lm_logits = module.lm_head(hidden_states) + lm_logits += module.final_logits_bias.astype(self.dtype) + + return lm_logits, outputs + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + if past_key_values is None: + lm_logits, decoder_outputs = outputs + else: + (lm_logits, decoder_outputs), past = outputs + + if return_dict: + outputs = FlaxCausalLMOutputWithCrossAttentions( + logits=lm_logits, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + ) + else: + outputs = (lm_logits,) + decoder_outputs[1:] + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + def _adapt_logits_for_beam_search(self, logits): + """This function enforces the padding token never to be generated.""" + logits = logits.at[:, :, self.config.pad_token_id].set(float("-inf")) + return logits + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + max_length, + attention_mask: Optional[jax.Array] = None, + decoder_attention_mask: Optional[jax.Array] = None, + encoder_outputs=None, + **kwargs, + ): + # initializing the cache + batch_size, seq_length = decoder_input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since the decoder uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if decoder_attention_mask is not None: + position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "encoder_outputs": encoder_outputs, + "encoder_attention_mask": attention_mask, + "decoder_attention_mask": extended_attention_mask, + "decoder_position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 + return model_kwargs + + +FLAX_MARIAN_MT_DOCSTRING = """ + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, FlaxMarianMTModel + + >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + + >>> text = "My friends are cool but they eat too many carbs." + >>> input_ids = tokenizer(text, max_length=64, return_tensors="jax").input_ids + + >>> sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences + + >>> outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True) + >>> # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.* + ``` +""" + +overwrite_call_docstring( + FlaxMarianMTModel, + MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING, +) +append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_marian.py b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_marian.py new file mode 100644 index 0000000000000000000000000000000000000000..10d7f1b6b2d16dde3e70fa2cae270f972e37f862 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_marian.py @@ -0,0 +1,1719 @@ +# coding=utf-8 +# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch MarianMTModel model, ported from the Marian C++ repo.""" + + +import copy +import math +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_marian import MarianConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "MarianConfig" +_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de" + + +# Copied from transformers.models.bart.modeling_bart.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +class MarianSinusoidalPositionalEmbedding(nn.Embedding): + """This module produces sinusoidal positional embeddings of any length.""" + + def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None: + super().__init__(num_positions, embedding_dim) + self.weight = self._init_weight(self.weight) + + @staticmethod + def _init_weight(out: nn.Parameter) -> nn.Parameter: + """ + Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in + the 2nd half of the vector. [dim // 2:] + """ + n_pos, dim = out.shape + position_enc = np.array( + [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] + ) + out.requires_grad = False # set early to avoid an error in pytorch-1.8+ + sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 + out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) + out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) + out.detach_() + return out + + @torch.no_grad() + def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor: + """`input_ids_shape` is expected to be [bsz x seqlen].""" + bsz, seq_len = input_ids_shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device + ) + return super().forward(positions) + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Marian +class MarianAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[MarianConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->Marian, BART->MARIAN +class MarianEncoderLayer(nn.Module): + def __init__(self, config: MarianConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = MARIAN_ATTENTION_CLASSES[config._attn_implementation]( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + config=config, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +MARIAN_ATTENTION_CLASSES = {"eager": MarianAttention} + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->Marian, BART->MARIAN +class MarianDecoderLayer(nn.Module): + def __init__(self, config: MarianConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = MARIAN_ATTENTION_CLASSES[config._attn_implementation]( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + is_causal=True, + config=config, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = MARIAN_ATTENTION_CLASSES[config._attn_implementation]( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + config=config, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class MarianPreTrainedModel(PreTrainedModel): + config_class = MarianConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + + def _init_weights(self, module: Union[nn.Linear, nn.Embedding, MarianSinusoidalPositionalEmbedding]): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, MarianSinusoidalPositionalEmbedding): + pass + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + "decoder_input_ids": input_ids, + } + return dummy_inputs + + +MARIAN_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`MarianConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MARIAN_GENERATION_EXAMPLE = r""" + Pytorch version of marian-nmt's transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available + models are listed [here](https://huggingface.co/models?search=Helsinki-NLP). + + Examples: + + ```python + >>> from transformers import AutoTokenizer, MarianMTModel + + >>> src = "fr" # source language + >>> trg = "en" # target language + + >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" + >>> model = MarianMTModel.from_pretrained(model_name) + >>> tokenizer = AutoTokenizer.from_pretrained(model_name) + + >>> sample_text = "où est l'arrêt de bus ?" + >>> batch = tokenizer([sample_text], return_tensors="pt") + + >>> generated_ids = model.generate(**batch) + >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] + "Where's the bus stop?" + ``` +""" + +MARIAN_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class MarianEncoder(MarianPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`MarianEncoderLayer`]. + + Args: + config: MarianConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + self.embed_positions = MarianSinusoidalPositionalEmbedding( + config.max_position_embeddings, embed_dim, self.padding_idx + ) + self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)]) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input_shape) + + hidden_states = inputs_embeds + embed_pos + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + assert head_mask.size()[0] == ( + len(self.layers) + ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + to_drop = False + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: # skip the layer + to_drop = True + + if to_drop: + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class MarianDecoder(MarianPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`] + + Args: + config: MarianConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: MarianConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.decoder_vocab_size, config.d_model, self.padding_idx) + + self.embed_positions = MarianSinusoidalPositionalEmbedding( + config.max_position_embeddings, config.d_model, self.padding_idx + ) + self.layers = nn.ModuleList([MarianDecoderLayer(config) for _ in range(config.decoder_layers)]) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _prepare_4d_attention_mask( + encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) + + # embed positions + positions = self.embed_positions(input_shape, past_key_values_length) + + hidden_states = inputs_embeds + positions + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + assert attn_mask.size()[0] == (len(self.layers)), ( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + if self.training: + dropout_probability = torch.rand([]) + if dropout_probability < self.layerdrop: + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + output_attentions, + use_cache, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare Marian Model outputting raw hidden-states without any specific head on top.", MARIAN_START_DOCSTRING +) +class MarianModel(MarianPreTrainedModel): + _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: MarianConfig): + super().__init__(config) + + padding_idx, vocab_size = config.pad_token_id, config.vocab_size + + # We always use self.shared for token embeddings to ensure compatibility with all marian models + self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) + if self.config.share_encoder_decoder_embeddings: + encoder_embed_tokens = decoder_embed_tokens = self.shared + else: + # Since the embeddings are not shared, deepcopy the embeddings here for encoder + # and decoder to make sure they are not tied. + encoder_embed_tokens = copy.deepcopy(self.shared) + decoder_embed_tokens = copy.deepcopy(self.shared) + self.shared = None + + self.encoder = MarianEncoder(config, encoder_embed_tokens) + self.decoder = MarianDecoder(config, decoder_embed_tokens) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + # This will return shared embeddings if they are shared else specific to encoder. + return self.get_encoder().get_input_embeddings() + + def set_input_embeddings(self, value): + if self.config.share_encoder_decoder_embeddings: + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + else: # if not shared only set encoder embeedings + self.encoder.embed_tokens = value + + def get_decoder_input_embeddings(self): + if self.config.share_encoder_decoder_embeddings: + raise ValueError( + "`get_decoder_input_embeddings` should not be called if `config.share_encoder_decoder_embeddings` " + "is `True`. Please use `get_input_embeddings` instead." + ) + return self.get_decoder().get_input_embeddings() + + def set_decoder_input_embeddings(self, value): + if self.config.share_encoder_decoder_embeddings: + raise ValueError( + "`config.share_encoder_decoder_embeddings` is set to `True` meaning the decoder input embeddings " + "are shared with the encoder. In order to set the decoder input embeddings, you should simply set " + "the encoder input embeddings by calling `set_input_embeddings` with the appropriate embeddings." + ) + self.decoder.embed_tokens = value + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def resize_decoder_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: + if self.config.share_encoder_decoder_embeddings: + raise ValueError( + "`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` " + "is `True`. Please use `resize_token_embeddings` instead." + ) + + old_embeddings = self.get_decoder_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.set_decoder_input_embeddings(new_embeddings) + + model_embeds = self.get_decoder_input_embeddings() + + if new_num_tokens is None: + return model_embeds + + # Update base model and current model config + self.config.decoder_vocab_size = new_num_tokens + + # Tie weights again if needed + self.tie_weights() + + return model_embeds + + @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Union[Tuple[torch.Tensor], BaseModelOutput]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Seq2SeqModelOutput: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, MarianModel + + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") + + >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") + >>> decoder_inputs = tokenizer( + ... " Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen", + ... return_tensors="pt", + ... add_special_tokens=False, + ... ) + >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 26, 512] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The Marian Model with a language modeling head. Can be used for summarization.", MARIAN_START_DOCSTRING +) +class MarianMTModel(MarianPreTrainedModel): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = [ + "final_logits_bias", + "encoder.embed_positions.weight", + "decoder.embed_positions.weight", + ] + _keys_to_ignore_on_save = ["model.encoder.embed_positions.weight", "model.decoder.embed_positions.weight"] + _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight"] + + def __init__(self, config: MarianConfig): + super().__init__(config) + self.model = MarianModel(config) + + target_vocab_size = config.vocab_size if config.share_encoder_decoder_embeddings else config.decoder_vocab_size + self.register_buffer("final_logits_bias", torch.zeros((1, target_vocab_size))) + self.lm_head = nn.Linear(config.d_model, target_vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if self.config.share_encoder_decoder_embeddings: + self._resize_final_logits_bias(new_num_tokens) + return new_embeddings + + def _resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of=None) -> nn.Embedding: + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + self.set_input_embeddings(new_embeddings) + + new_num_tokens = new_embeddings.weight.shape[0] + # update config.decoder_vocab_size if embeddings are tied + if self.config.share_encoder_decoder_embeddings: + self.config.decoder_vocab_size = new_num_tokens + + # if word embeddings are not tied, make sure that lm head is resized as well + if ( + self.config.share_encoder_decoder_embeddings + and self.get_output_embeddings() is not None + and not self.config.tie_word_embeddings + ): + old_lm_head = self.get_output_embeddings() + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + self.set_output_embeddings(new_lm_head) + + return self.get_input_embeddings() + + def resize_decoder_token_embeddings(self, new_num_tokens): + if self.config.share_encoder_decoder_embeddings: + raise ValueError( + "`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` " + "is `True`. Please use `resize_token_embeddings` instead." + ) + + old_embeddings = self.model.get_decoder_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.model.set_decoder_input_embeddings(new_embeddings) + + # if word embeddings are not tied, make sure that lm head is resized as well + if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + old_lm_head = self.get_output_embeddings() + new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens) + self.set_output_embeddings(new_lm_head) + + model_embeds = self.model.get_decoder_input_embeddings() + + if new_num_tokens is None: + return model_embeds + + # Update base model and current model config + self.config.decoder_vocab_size = new_num_tokens + + # Tie weights again if needed + self.tie_weights() + + self._resize_final_logits_bias(new_num_tokens) + + return model_embeds + + def _resize_final_logits_bias(self, new_num_tokens: int) -> None: + old_num_tokens = self.final_logits_bias.shape[-1] + if new_num_tokens <= old_num_tokens: + new_bias = self.final_logits_bias[:, :new_num_tokens] + else: + extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) + new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) + self.register_buffer("final_logits_bias", new_bias) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings: nn.Embedding): + self.lm_head = new_embeddings + + def tie_weights(self): + """ + Tie the weights between the input embeddings and the output embeddings. + + If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the + weights instead. + """ + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None and getattr(self.config, "tie_word_embeddings", True): + # if embeddings are shared this will return shared embeddings otherwise decoder embed_tokens + word_embeddings = self.get_decoder().get_input_embeddings() + self._tie_or_clone_weights(output_embeddings, word_embeddings) + + if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False): + if hasattr(self, self.base_model_prefix): + self = getattr(self, self.base_model_prefix) + tied_weights = self._tie_encoder_decoder_weights( + self.encoder, self.decoder, self.base_model_prefix, "encoder" + ) + # Setting a dynamic variable instead of `_tied_weights_keys` because it's a class + # attributed not an instance member, therefore modifying it will modify the entire class + # Leading to issues on subsequent calls by different tests or subsequent calls. + self._dynamic_tied_weights_keys = tied_weights + + for module in self.modules(): + if hasattr(module, "_tie_weights"): + module._tie_weights() + + @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(MARIAN_GENERATION_EXAMPLE) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Union[Tuple[torch.Tensor], BaseModelOutput]] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Seq2SeqLMOutput: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.decoder_vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids: torch.LongTensor, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + encoder_outputs: Optional[Union[Tuple[torch.Tensor], BaseModelOutput]] = None, + **kwargs, + ) -> Dict: + # cut decoder_input_ids if past is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if decoder_input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = decoder_input_ids.shape[1] - 1 + + decoder_input_ids = decoder_input_ids[:, remove_prefix_length:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + + layer_past[2:], + ) + return reordered_past + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Marian +class MarianDecoderWrapper(MarianPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + self.decoder = MarianDecoder(config) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) + + +# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Marian, facebook/bart-base->Helsinki-NLP/opus-mt-fr-en +class MarianForCausalLM(MarianPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + config = copy.deepcopy(config) + config.is_decoder = True + config.is_encoder_decoder = False + super().__init__(config) + self.model = MarianDecoderWrapper(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + if the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used + in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, MarianForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en") + >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False) + >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size] + >>> list(logits.shape) == expected_shape + True + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.lm_head(outputs[0]) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + # first step, decoder_cached_states are empty + return { + "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed + "attention_mask": attention_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_tf_marian.py b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_tf_marian.py new file mode 100644 index 0000000000000000000000000000000000000000..c6d5355f70c5a12a60ffd731b210cdce820bbf9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/modeling_tf_marian.py @@ -0,0 +1,1557 @@ +# coding=utf-8 +# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 Marian model.""" + + +from __future__ import annotations + +import random +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFBaseModelOutputWithPastAndCrossAttentions, + TFSeq2SeqLMOutput, + TFSeq2SeqModelOutput, +) + +# Public API +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFPreTrainedModel, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_marian import MarianConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de" +_CONFIG_FOR_DOC = "MarianConfig" + + +LARGE_NEGATIVE = -1e8 + + +# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right +def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids = tf.where( + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, + ) + + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) + + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz = input_ids_shape[0] + tgt_len = input_ids_shape[1] + mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE + mask_cond = tf.range(shape_list(mask)[-1]) + + mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) + + if past_key_values_length > 0: + mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) + + return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +class TFMarianSinusoidalPositionalEmbedding(keras.layers.Layer): + """This module produces sinusoidal positional embeddings of any length.""" + + def __init__(self, num_positions: int, embedding_dim: int, **kwargs): + super().__init__(**kwargs) + + if embedding_dim % 2 != 0: + raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported") + + self.embedding_dim = embedding_dim + self.num_positions = num_positions + + def build(self, input_shape: tf.TensorShape): + """ + Build shared token embedding layer Shared weights logic adapted from + https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 + """ + + weight = self._init_weight(self.num_positions, self.embedding_dim) + + self.weight = self.add_weight( + name="embeddings", + shape=[self.num_positions, self.embedding_dim], + ) + weight = tf.cast(weight, dtype=self.weight.dtype) + + self.weight.assign(weight) + + super().build(input_shape) + + @staticmethod + def _init_weight(n_pos: int, dim: int): + """ + Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in + the 2nd half of the vector. [dim // 2:] + """ + position_enc = np.array( + [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] + ) + table = np.zeros_like(position_enc) + # index 0 is all zero + table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2]) + table[:, dim // 2 :] = np.cos(position_enc[:, 1::2]) + # convert to tensor + table = tf.convert_to_tensor(table) + tf.stop_gradient(table) + return table + + def call( + self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None + ): + """Input is expected to be of size [bsz x seqlen].""" + if position_ids is None: + seq_len = input_shape[1] + position_ids = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range") + return tf.gather(self.weight, position_ids) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Marian +class TFMarianAttention(keras.layers.Layer): + """Multi-headed attention from "Attention Is All You Need""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + **kwargs, + ): + super().__init__(**kwargs) + self.embed_dim = embed_dim + + self.num_heads = num_heads + self.dropout = keras.layers.Dropout(dropout) + self.head_dim = embed_dim // num_heads + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") + self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") + self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") + self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") + + def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): + return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) + + def call( + self, + hidden_states: tf.Tensor, + key_value_states: tf.Tensor | None = None, + past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, + attention_mask: tf.Tensor | None = None, + layer_head_mask: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, tf.Tensor | None]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + bsz, tgt_len, embed_dim = shape_list(hidden_states) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = tf.concat([past_key_value[0], key_states], axis=2) + value_states = tf.concat([past_key_value[1], value_states], axis=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) + key_states = tf.reshape(key_states, proj_shape) + value_states = tf.reshape(value_states, proj_shape) + + src_len = shape_list(key_states)[1] + attn_weights = tf.matmul(query_states, key_states, transpose_b=True) + + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: + tf.debugging.assert_equal( + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], + message=( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" + ), + ) + + attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) + attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_weights = stable_softmax(attn_weights, axis=-1) + + if layer_head_mask is not None: + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) + + attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( + attn_weights, (bsz, self.num_heads, tgt_len, src_len) + ) + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_probs = self.dropout(attn_weights, training=training) + attn_output = tf.matmul(attn_probs, value_states) + + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) + + attn_output = tf.transpose( + tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) + ) + attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) + + attn_output = self.out_proj(attn_output) + attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + + return attn_output, attn_weights, past_key_value + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "k_proj", None) is not None: + with tf.name_scope(self.k_proj.name): + self.k_proj.build([None, None, self.embed_dim]) + if getattr(self, "q_proj", None) is not None: + with tf.name_scope(self.q_proj.name): + self.q_proj.build([None, None, self.embed_dim]) + if getattr(self, "v_proj", None) is not None: + with tf.name_scope(self.v_proj.name): + self.v_proj.build([None, None, self.embed_dim]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.embed_dim]) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->Marian +class TFMarianEncoderLayer(keras.layers.Layer): + def __init__(self, config: MarianConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFMarianAttention( + self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" + ) + self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.dropout = keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = keras.layers.Dropout(config.activation_dropout) + self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") + self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + self.config = config + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: np.ndarray | tf.Tensor | None, + layer_head_mask: tf.Tensor | None, + training: Optional[bool] = False, + ) -> tf.Tensor: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)` + """ + residual = hidden_states + hidden_states, self_attn_weights, _ = self.self_attn( + hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask + ) + + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) + + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + return hidden_states, self_attn_weights + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "self_attn_layer_norm", None) is not None: + with tf.name_scope(self.self_attn_layer_norm.name): + self.self_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "fc1", None) is not None: + with tf.name_scope(self.fc1.name): + self.fc1.build([None, None, self.embed_dim]) + if getattr(self, "fc2", None) is not None: + with tf.name_scope(self.fc2.name): + self.fc2.build([None, None, self.config.encoder_ffn_dim]) + if getattr(self, "final_layer_norm", None) is not None: + with tf.name_scope(self.final_layer_norm.name): + self.final_layer_norm.build([None, None, self.embed_dim]) + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->Marian +class TFMarianDecoderLayer(keras.layers.Layer): + def __init__(self, config: MarianConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFMarianAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + name="self_attn", + is_decoder=True, + ) + self.dropout = keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = keras.layers.Dropout(config.activation_dropout) + + self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.encoder_attn = TFMarianAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + name="encoder_attn", + is_decoder=True, + ) + self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") + self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") + self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + self.config = config + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: np.ndarray | tf.Tensor | None = None, + encoder_hidden_states: np.ndarray | tf.Tensor | None = None, + encoder_attention_mask: np.ndarray | tf.Tensor | None = None, + layer_head_mask: tf.Tensor | None = None, + cross_attn_layer_head_mask: tf.Tensor | None = None, + past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`tf.Tensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`tf.Tensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(decoder_attention_heads,)` + cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. + `(decoder_attention_heads,)` + past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + return ( + hidden_states, + self_attn_weights, + cross_attn_weights, + present_key_value, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attn", None) is not None: + with tf.name_scope(self.self_attn.name): + self.self_attn.build(None) + if getattr(self, "self_attn_layer_norm", None) is not None: + with tf.name_scope(self.self_attn_layer_norm.name): + self.self_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "encoder_attn", None) is not None: + with tf.name_scope(self.encoder_attn.name): + self.encoder_attn.build(None) + if getattr(self, "encoder_attn_layer_norm", None) is not None: + with tf.name_scope(self.encoder_attn_layer_norm.name): + self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) + if getattr(self, "fc1", None) is not None: + with tf.name_scope(self.fc1.name): + self.fc1.build([None, None, self.embed_dim]) + if getattr(self, "fc2", None) is not None: + with tf.name_scope(self.fc2.name): + self.fc2.build([None, None, self.config.decoder_ffn_dim]) + if getattr(self, "final_layer_norm", None) is not None: + with tf.name_scope(self.final_layer_norm.name): + self.final_layer_norm.build([None, None, self.embed_dim]) + + +class TFMarianPreTrainedModel(TFPreTrainedModel): + config_class = MarianConfig + base_model_prefix = "model" + + +MARIAN_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Args: + config ([`MarianConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MARIAN_GENERATION_EXAMPLE = r""" + TF version of marian-nmt's transformer.h (c++). Designed for the OPUS-NMT translation checkpoints. Available + models are listed [here](https://huggingface.co/models?search=Helsinki-NLP). + + Examples: + + ```python + >>> from transformers import AutoTokenizer, TFMarianMTModel + >>> from typing import List + + >>> src = "fr" # source language + >>> trg = "en" # target language + >>> sample_text = "où est l'arrêt de bus ?" + >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}" + + >>> model = TFMarianMTModel.from_pretrained(model_name) + >>> tokenizer = AutoTokenizer.from_pretrained(model_name) + >>> batch = tokenizer([sample_text], return_tensors="tf") + >>> gen = model.generate(**batch) + >>> tokenizer.batch_decode(gen, skip_special_tokens=True) + "Where is the bus stop ?" + ``` +""" + +MARIAN_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. + decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tf.FloatTensor`, *optional*): + hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + of shape `(batch_size, sequence_length, hidden_size)` is a sequence of + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@keras_serializable +class TFMarianEncoder(keras.layers.Layer): + config_class = MarianConfig + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`TFMarianEncoderLayer`]. + + Args: + config: MarianConfig + """ + + def __init__(self, config: MarianConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs): + super().__init__(**kwargs) + self.config = config + self.dropout = keras.layers.Dropout(config.dropout) + self.layerdrop = config.encoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 + + self.embed_tokens = embed_tokens + self.embed_positions = TFMarianSinusoidalPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + name="embed_positions", + ) + self.layers = [TFMarianEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] + + def get_embed_tokens(self): + return self.embed_tokens + + def set_embed_tokens(self, embed_tokens): + self.embed_tokens = embed_tokens + + @unpack_inputs + def call( + self, + input_ids: tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ): + """ + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value + in the config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. This argument can be used only in eager mode, in graph mode the value in the config + will be used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used + in eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). + """ + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input_shape) + hidden_states = inputs_embeds + embed_pos + hidden_states = self.dropout(hidden_states, training=training) + + # check attention mask and invert + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask) + else: + attention_mask = None + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + tf.debugging.assert_equal( + shape_list(head_mask)[0], + len(self.layers), + message=( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {shape_list(head_mask)[0]}." + ), + ) + + # encoder layers + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if training and (dropout_probability < self.layerdrop): # skip the layer + continue + + hidden_states, attn = encoder_layer( + hidden_states, + attention_mask, + head_mask[idx] if head_mask is not None else None, + ) + + if output_attentions: + all_attentions += (attn,) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embed_positions", None) is not None: + with tf.name_scope(self.embed_positions.name): + self.embed_positions.build(None) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFMarianDecoder(keras.layers.Layer): + config_class = MarianConfig + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFMarianDecoderLayer`] + + Args: + config: MarianConfig + embed_tokens: output embedding + """ + + def __init__(self, config: MarianConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs): + super().__init__(**kwargs) + self.config = config + self.padding_idx = config.pad_token_id + self.embed_tokens = embed_tokens + self.layerdrop = config.decoder_layerdrop + self.embed_positions = TFMarianSinusoidalPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + name="embed_positions", + ) + self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 + self.layers = [TFMarianDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] + + self.dropout = keras.layers.Dropout(config.dropout) + + def get_embed_tokens(self): + return self.embed_tokens + + def set_embed_tokens(self, embed_tokens): + self.embed_tokens = embed_tokens + + @unpack_inputs + def call( + self, + input_ids: tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + position_ids: tf.Tensor | None = None, + encoder_hidden_states: tf.Tensor | None = None, + encoder_attention_mask: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ): + r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up + decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value + in the config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. This argument can be used only in eager mode, in graph mode the value in the config + will be used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used + in eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). + """ + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 + + # embed positions + if position_ids is None: + positions = self.embed_positions(input_shape, past_key_values_length) + else: + positions = self.embed_positions(input_shape, position_ids=position_ids) + + if inputs_embeds is None: + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + hidden_states = inputs_embeds + + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + else: + combined_attention_mask = _expand_mask( + tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] + ) + + if attention_mask is not None: + combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) + + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) + + hidden_states = self.dropout(hidden_states + positions, training=training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None + present_key_values = () if use_cache else None + + # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired + for attn_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: + if attn_mask is not None: + tf.debugging.assert_equal( + shape_list(attn_mask)[0], + len(self.layers), + message=( + f"The {attn_name} should be specified for {len(self.layers)} layers, but it is for" + f" {shape_list(attn_mask)[0]}." + ), + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + + if training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( + hidden_states, + attention_mask=combined_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=head_mask[idx] if head_mask is not None else None, + cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + past_key_value=past_key_value, + ) + + if use_cache: + present_key_values += (present_key_value,) + + if output_attentions: + all_self_attns += (layer_self_attn,) + + if encoder_hidden_states is not None: + all_cross_attns += (layer_cross_attn,) + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns + else: + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=present_key_values, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attns, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embed_positions", None) is not None: + with tf.name_scope(self.embed_positions.name): + self.embed_positions.build(None) + if getattr(self, "layers", None) is not None: + for layer in self.layers: + with tf.name_scope(layer.name): + layer.build(None) + + +@keras_serializable +class TFMarianMainLayer(keras.layers.Layer): + config_class = MarianConfig + + def __init__(self, config: MarianConfig, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.shared = keras.layers.Embedding( + input_dim=config.vocab_size, + output_dim=config.d_model, + embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), + name="model.shared", + ) + # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) + self.shared.load_weight_prefix = "model.shared" + + self.encoder = TFMarianEncoder(config, self.shared, name="encoder") + self.decoder = TFMarianDecoder(config, self.shared, name="decoder") + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, new_embeddings): + self.shared = new_embeddings + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + @unpack_inputs + def call( + self, + input_ids: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + decoder_input_ids: tf.Tensor | None = None, + decoder_attention_mask: tf.Tensor | None = None, + decoder_position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + decoder_head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, + past_key_values: Tuple[Tuple[tf.Tensor]] = None, + inputs_embeds: tf.Tensor | None = None, + decoder_inputs_embeds: tf.Tensor | None = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + **kwargs, + ): + if decoder_input_ids is None and decoder_inputs_embeds is None: + use_cache = False + + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): + encoder_outputs = TFBaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False + elif not return_dict and not isinstance(encoder_outputs, tuple): + encoder_outputs = encoder_outputs.to_tuple() + + decoder_outputs = self.decoder( + decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return TFSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + # The shared/tied weights expect to be in the model base namespace + # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than + # the current one. + with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): + self.shared.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "decoder", None) is not None: + with tf.name_scope(self.decoder.name): + self.decoder.build(None) + + +@add_start_docstrings( + "The bare MARIAN Model outputting raw hidden-states without any specific head on top.", + MARIAN_START_DOCSTRING, +) +class TFMarianModel(TFMarianPreTrainedModel): + def __init__(self, config: MarianConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.model = TFMarianMainLayer(config, name="model") + + def get_encoder(self): + return self.model.encoder + + def get_decoder(self): + return self.model.decoder + + @unpack_inputs + @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFSeq2SeqModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + decoder_input_ids: tf.Tensor | None = None, + decoder_attention_mask: tf.Tensor | None = None, + decoder_position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + decoder_head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + encoder_outputs: tf.Tensor | None = None, + past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, + inputs_embeds: tf.Tensor | None = None, + decoder_inputs_embeds: tf.Tensor | None = None, + use_cache: bool | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + return_dict: bool | None = None, + training: bool = False, + **kwargs, + ) -> Tuple[tf.Tensor] | TFSeq2SeqModelOutput: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqModelOutput( + last_hidden_state=output.last_hidden_state, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "model", None) is not None: + with tf.name_scope(self.model.name): + self.model.build(None) + + +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + +@add_start_docstrings( + "The MARIAN Model with a language modeling head. Can be used for summarization.", + MARIAN_START_DOCSTRING, +) +class TFMarianMTModel(TFMarianPreTrainedModel, TFCausalLanguageModelingLoss): + _keys_to_ignore_on_load_unexpected = [ + r"model.encoder.embed_tokens.weight", + r"model.decoder.embed_tokens.weight", + ] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.model = TFMarianMainLayer(config, name="model") + self.use_cache = config.use_cache + # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. + self.bias_layer = BiasLayer( + name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False + ) + + def get_decoder(self): + return self.model.decoder + + def get_encoder(self): + return self.model.encoder + + def get_output_embeddings(self): + return self.get_input_embeddings() + + def set_output_embeddings(self, value): + self.set_input_embeddings(value) + + def get_bias(self): + return {"final_logits_bias": self.bias_layer.bias} + + def set_bias(self, value): + # Replaces the existing layers containing bias for correct (de)serialization. + vocab_size = value["final_logits_bias"].shape[-1] + self.bias_layer = BiasLayer( + name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False + ) + self.bias_layer.bias.assign(value["final_logits_bias"]) + + @unpack_inputs + @add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(MARIAN_GENERATION_EXAMPLE) + def call( + self, + input_ids: tf.Tensor | None = None, + attention_mask: tf.Tensor | None = None, + decoder_input_ids: tf.Tensor | None = None, + decoder_attention_mask: tf.Tensor | None = None, + decoder_position_ids: tf.Tensor | None = None, + head_mask: tf.Tensor | None = None, + decoder_head_mask: tf.Tensor | None = None, + cross_attn_head_mask: tf.Tensor | None = None, + encoder_outputs: TFBaseModelOutput | None = None, + past_key_values: Tuple[Tuple[tf.Tensor]] | None = None, + inputs_embeds: tf.Tensor | None = None, + decoder_inputs_embeds: tf.Tensor | None = None, + use_cache: bool | None = None, + output_attentions: bool | None = None, + output_hidden_states: bool | None = None, + return_dict: bool | None = None, + labels: tf.Tensor | None = None, + training: bool = False, + ) -> Tuple[tf.Tensor] | TFSeq2SeqLMOutput: + r""" + labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + """ + + if labels is not None: + labels = tf.where( + labels == self.config.pad_token_id, + tf.fill(shape_list(labels), tf.cast(-100, labels.dtype)), + labels, + ) + use_cache = False + if decoder_input_ids is None and decoder_inputs_embeds is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) + lm_logits = self.bias_layer(lm_logits) + masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + return TFSeq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, # index 1 of d outputs + decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs + decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs + cross_attentions=outputs.cross_attentions, # index 4 of d outputs + encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs + encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out + encoder_attentions=outputs.encoder_attentions, # 2 of e out + ) + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqLMOutput( + logits=output.logits, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + if decoder_attention_mask is not None: # xla + decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] + elif past_key_values is not None: # no xla + past_key_values + decoder_position_ids = past_key_values[0][0].shape[2] + else: # no xla + no past_key_values + decoder_position_ids = tf.range(decoder_input_ids.shape[1]) + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": decoder_attention_mask, + "decoder_position_ids": decoder_position_ids, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "model", None) is not None: + with tf.name_scope(self.model.name): + self.model.build(None) + if getattr(self, "bias_layer", None) is not None: + with tf.name_scope(self.bias_layer.name): + self.bias_layer.build(None) diff --git a/venv/lib/python3.10/site-packages/transformers/models/marian/tokenization_marian.py b/venv/lib/python3.10/site-packages/transformers/models/marian/tokenization_marian.py new file mode 100644 index 0000000000000000000000000000000000000000..4f0d90b6f0dffeab448b9f3d34a32b407e02f829 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/marian/tokenization_marian.py @@ -0,0 +1,391 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import re +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple, Union + +import sentencepiece + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "source_spm": "source.spm", + "target_spm": "target.spm", + "vocab": "vocab.json", + "target_vocab_file": "target_vocab.json", + "tokenizer_config_file": "tokenizer_config.json", +} + + +SPIECE_UNDERLINE = "▁" + +# Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json + + +class MarianTokenizer(PreTrainedTokenizer): + r""" + Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + source_spm (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that + contains the vocabulary for the source language. + target_spm (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that + contains the vocabulary for the target language. + source_lang (`str`, *optional*): + A string representing the source language. + target_lang (`str`, *optional*): + A string representing the target language. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + model_max_length (`int`, *optional*, defaults to 512): + The maximum sentence length the model accepts. + additional_special_tokens (`List[str]`, *optional*, defaults to `["", ""]`): + Additional special tokens used by the tokenizer. + sp_model_kwargs (`dict`, *optional*): + Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for + SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, + to set: + + - `enable_sampling`: Enable subword regularization. + - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. + + - `nbest_size = {0,1}`: No sampling is performed. + - `nbest_size > 1`: samples from the nbest_size results. + - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) + using forward-filtering-and-backward-sampling algorithm. + + - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for + BPE-dropout. + + Examples: + + ```python + >>> from transformers import MarianForCausalLM, MarianTokenizer + + >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") + >>> src_texts = ["I am a small frog.", "Tom asked his teacher for advice."] + >>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional + >>> inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True) + + >>> outputs = model(**inputs) # should work + ```""" + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + language_code_re = re.compile(">>.+<<") # type: re.Pattern + + def __init__( + self, + source_spm, + target_spm, + vocab, + target_vocab_file=None, + source_lang=None, + target_lang=None, + unk_token="", + eos_token="", + pad_token="", + model_max_length=512, + sp_model_kwargs: Optional[Dict[str, Any]] = None, + separate_vocabs=False, + **kwargs, + ) -> None: + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + + assert Path(source_spm).exists(), f"cannot find spm source {source_spm}" + + self.separate_vocabs = separate_vocabs + self.encoder = load_json(vocab) + if str(unk_token) not in self.encoder: + raise KeyError(" token must be in the vocab") + assert str(pad_token) in self.encoder + + if separate_vocabs: + self.target_encoder = load_json(target_vocab_file) + self.decoder = {v: k for k, v in self.target_encoder.items()} + self.supported_language_codes = [] + else: + self.decoder = {v: k for k, v in self.encoder.items()} + self.supported_language_codes: list = [k for k in self.encoder if k.startswith(">>") and k.endswith("<<")] + + self.source_lang = source_lang + self.target_lang = target_lang + self.spm_files = [source_spm, target_spm] + + # load SentencePiece model for pre-processing + self.spm_source = load_spm(source_spm, self.sp_model_kwargs) + self.spm_target = load_spm(target_spm, self.sp_model_kwargs) + self.current_spm = self.spm_source + self.current_encoder = self.encoder + + # Multilingual target side: default to using first supported language code. + + self._setup_normalizer() + + super().__init__( + # bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id + source_lang=source_lang, + target_lang=target_lang, + unk_token=unk_token, + eos_token=eos_token, + pad_token=pad_token, + model_max_length=model_max_length, + sp_model_kwargs=self.sp_model_kwargs, + target_vocab_file=target_vocab_file, + separate_vocabs=separate_vocabs, + **kwargs, + ) + + def _setup_normalizer(self): + try: + from sacremoses import MosesPunctNormalizer + + self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize + except (ImportError, FileNotFoundError): + warnings.warn("Recommended: pip install sacremoses.") + self.punc_normalizer = lambda x: x + + def normalize(self, x: str) -> str: + """Cover moses empty string edge case. They return empty list for '' input!""" + return self.punc_normalizer(x) if x else "" + + def _convert_token_to_id(self, token): + return self.current_encoder.get(token, self.current_encoder[self.unk_token]) + + def remove_language_code(self, text: str): + """Remove language codes like >>fr<< before sentencepiece""" + match = self.language_code_re.match(text) + code: list = [match.group(0)] if match else [] + return code, self.language_code_re.sub("", text) + + def _tokenize(self, text: str) -> List[str]: + code, text = self.remove_language_code(text) + pieces = self.current_spm.encode(text, out_type=str) + return code + pieces + + def _convert_id_to_token(self, index: int) -> str: + """Converts an index (integer) in a token (str) using the decoder.""" + return self.decoder.get(index, self.unk_token) + + def batch_decode(self, sequences, **kwargs): + """ + Convert a list of lists of token ids into a list of strings by calling decode. + + Args: + sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`): + List of tokenized input ids. Can be obtained using the `__call__` method. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether or not to clean up the tokenization spaces. If `None`, will default to + `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). + use_source_tokenizer (`bool`, *optional*, defaults to `False`): + Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence + problems). + kwargs (additional keyword arguments, *optional*): + Will be passed to the underlying model specific decode method. + + Returns: + `List[str]`: The list of decoded sentences. + """ + return super().batch_decode(sequences, **kwargs) + + def decode(self, token_ids, **kwargs): + """ + Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special + tokens and clean up tokenization spaces. + + Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. + + Args: + token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): + List of tokenized input ids. Can be obtained using the `__call__` method. + skip_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not to remove special tokens in the decoding. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether or not to clean up the tokenization spaces. If `None`, will default to + `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). + use_source_tokenizer (`bool`, *optional*, defaults to `False`): + Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence + problems). + kwargs (additional keyword arguments, *optional*): + Will be passed to the underlying model specific decode method. + + Returns: + `str`: The decoded sentence. + """ + return super().decode(token_ids, **kwargs) + + def convert_tokens_to_string(self, tokens: List[str]) -> str: + """Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise""" + sp_model = self.spm_source if self._decode_use_source_tokenizer else self.spm_target + current_sub_tokens = [] + out_string = "" + for token in tokens: + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + out_string += sp_model.decode_pieces(current_sub_tokens) + token + " " + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + out_string += sp_model.decode_pieces(current_sub_tokens) + out_string = out_string.replace(SPIECE_UNDERLINE, " ") + return out_string.strip() + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]: + """Build model inputs from a sequence by appending eos_token_id.""" + if token_ids_1 is None: + return token_ids_0 + [self.eos_token_id] + # We don't expect to process pairs, but leave the pair logic for API consistency + return token_ids_0 + token_ids_1 + [self.eos_token_id] + + def _switch_to_input_mode(self): + self.current_spm = self.spm_source + self.current_encoder = self.encoder + + def _switch_to_target_mode(self): + self.current_spm = self.spm_target + if self.separate_vocabs: + self.current_encoder = self.target_encoder + + @property + def vocab_size(self) -> int: + return len(self.encoder) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + saved_files = [] + + if self.separate_vocabs: + out_src_vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"], + ) + out_tgt_vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["target_vocab_file"], + ) + save_json(self.encoder, out_src_vocab_file) + save_json(self.target_encoder, out_tgt_vocab_file) + saved_files.append(out_src_vocab_file) + saved_files.append(out_tgt_vocab_file) + else: + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"] + ) + save_json(self.encoder, out_vocab_file) + saved_files.append(out_vocab_file) + + for spm_save_filename, spm_orig_path, spm_model in zip( + [VOCAB_FILES_NAMES["source_spm"], VOCAB_FILES_NAMES["target_spm"]], + self.spm_files, + [self.spm_source, self.spm_target], + ): + spm_save_path = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + spm_save_filename + ) + if os.path.abspath(spm_orig_path) != os.path.abspath(spm_save_path) and os.path.isfile(spm_orig_path): + copyfile(spm_orig_path, spm_save_path) + saved_files.append(spm_save_path) + elif not os.path.isfile(spm_orig_path): + with open(spm_save_path, "wb") as fi: + content_spiece_model = spm_model.serialized_model_proto() + fi.write(content_spiece_model) + saved_files.append(spm_save_path) + + return tuple(saved_files) + + def get_vocab(self) -> Dict: + return self.get_src_vocab() + + def get_src_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def get_tgt_vocab(self): + return dict(self.target_encoder, **self.added_tokens_decoder) + + def __getstate__(self) -> Dict: + state = self.__dict__.copy() + state.update( + {k: None for k in ["spm_source", "spm_target", "current_spm", "punc_normalizer", "target_vocab_file"]} + ) + return state + + def __setstate__(self, d: Dict) -> None: + self.__dict__ = d + + # for backward compatibility + if not hasattr(self, "sp_model_kwargs"): + self.sp_model_kwargs = {} + + self.spm_source, self.spm_target = (load_spm(f, self.sp_model_kwargs) for f in self.spm_files) + self.current_spm = self.spm_source + self._setup_normalizer() + + def num_special_tokens_to_add(self, *args, **kwargs): + """Just EOS""" + return 1 + + def _special_token_mask(self, seq): + all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp + all_special_ids.remove(self.unk_token_id) # is only sometimes special + return [1 if x in all_special_ids else 0 for x in seq] + + def get_special_tokens_mask( + self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """Get list where entries are [1] if a token is [eos] or [pad] else 0.""" + if already_has_special_tokens: + return self._special_token_mask(token_ids_0) + elif token_ids_1 is None: + return self._special_token_mask(token_ids_0) + [1] + else: + return self._special_token_mask(token_ids_0 + token_ids_1) + [1] + + +def load_spm(path: str, sp_model_kwargs: Dict[str, Any]) -> sentencepiece.SentencePieceProcessor: + spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) + spm.Load(path) + return spm + + +def save_json(data, path: str) -> None: + with open(path, "w") as f: + json.dump(data, f, indent=2) + + +def load_json(path: str) -> Union[Dict, List]: + with open(path, "r") as f: + return json.load(f) diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/nougat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc8bbddf9e9ca6446b5a9c5f73c2cc4eb27975e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/nougat/__init__.py @@ -0,0 +1,63 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_vision_available + + +_import_structure = { + "processing_nougat": ["NougatProcessor"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_nougat_fast"] = ["NougatTokenizerFast"] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_nougat"] = ["NougatImageProcessor"] + + +if TYPE_CHECKING: + from .processing_nougat import NougatProcessor + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_nougat_fast import NougatTokenizerFast + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_nougat import NougatImageProcessor + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3493f27936ad6d0494cb463cdc2e9c63a29e366d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5d2bc3aef07d30288422fe1828c8a2996f90022 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/convert_nougat_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f86663e61733080472e4c03b74f9ddb5d10ee01 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/image_processing_nougat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23ebcd80c4c9f32f9ca5ad44e3bfe349f00e98ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/processing_nougat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f0cf83d26e43b96d7c59d35a715d5efb9472c29 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/nougat/__pycache__/tokenization_nougat_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc74fdb5fbe8f0e4ad49069d7a739934ccc2330 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/nougat/convert_nougat_to_hf.py @@ -0,0 +1,282 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Nougat checkpoints using the original `nougat` library. URL: +https://github.com/facebookresearch/nougat/tree/main""" + +import argparse + +import torch +from huggingface_hub import hf_hub_download +from nougat import NougatModel +from nougat.dataset.rasterize import rasterize_paper +from nougat.utils.checkpoint import get_checkpoint +from PIL import Image + +from transformers import ( + DonutSwinConfig, + DonutSwinModel, + MBartConfig, + MBartForCausalLM, + NougatImageProcessor, + NougatProcessor, + NougatTokenizerFast, + VisionEncoderDecoderModel, +) + + +def get_configs(model): + original_config = model.config + + encoder_config = DonutSwinConfig( + image_size=original_config.input_size, + patch_size=4, + depths=original_config.encoder_layer, + num_heads=[4, 8, 16, 32], + window_size=original_config.window_size, + embed_dim=128, + ) + decoder_config = MBartConfig( + is_decoder=True, + is_encoder_decoder=False, + add_cross_attention=True, + decoder_layers=original_config.decoder_layer, + max_position_embeddings=original_config.max_position_embeddings, + vocab_size=len( + model.decoder.tokenizer + ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json) + scale_embedding=True, + add_final_layer_norm=True, + tie_word_embeddings=False, + ) + + return encoder_config, decoder_config + + +# Copied from transformers.models.donut.convert_donut_to_pytorch.rename_key +def rename_key(name): + if "encoder.model" in name: + name = name.replace("encoder.model", "encoder") + if "decoder.model" in name: + name = name.replace("decoder.model", "decoder") + if "patch_embed.proj" in name: + name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") + if "patch_embed.norm" in name: + name = name.replace("patch_embed.norm", "embeddings.norm") + if name.startswith("encoder"): + if "layers" in name: + name = "encoder." + name + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name and "mask" not in name: + name = name.replace("attn", "attention.self") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + + if name == "encoder.norm.weight": + name = "encoder.layernorm.weight" + if name == "encoder.norm.bias": + name = "encoder.layernorm.bias" + + return name + + +# Copied from transformers.models.donut.convert_donut_to_pytorch.convert_state_dict +def convert_state_dict(orig_state_dict, model): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "qkv" in key: + key_split = key.split(".") + layer_num = int(key_split[3]) + block_num = int(key_split[5]) + dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size + + if "weight" in key: + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" + ] = val[:dim, :] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight" + ] = val[dim : dim * 2, :] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" + ] = val[-dim:, :] + else: + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias" + ] = val[:dim] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias" + ] = val[dim : dim * 2] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias" + ] = val[-dim:] + elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: + # HuggingFace implementation doesn't use attn_mask buffer + # and model doesn't use final LayerNorms for the encoder + pass + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +def convert_nougat_checkpoint(model_tag, pytorch_dump_folder_path=None, push_to_hub=False): + # load original model + checkpoint_path = get_checkpoint(None, model_tag) + original_model = NougatModel.from_pretrained(checkpoint_path) + original_model.eval() + + # load HuggingFace model + encoder_config, decoder_config = get_configs(original_model) + encoder = DonutSwinModel(encoder_config) + decoder = MBartForCausalLM(decoder_config) + model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) + model.eval() + + state_dict = original_model.state_dict() + new_state_dict = convert_state_dict(state_dict, model) + model.load_state_dict(new_state_dict) + + # verify results on PDF + filepath = hf_hub_download(repo_id="ysharma/nougat", filename="input/nougat.pdf", repo_type="space") + images = rasterize_paper(pdf=filepath, return_pil=True) + image = Image.open(images[0]) + + tokenizer_file = checkpoint_path / "tokenizer.json" + tokenizer = NougatTokenizerFast(tokenizer_file=str(tokenizer_file)) + tokenizer.pad_token = "" + tokenizer.bos_token = "" + tokenizer.eos_token = "" + tokenizer.unk_token = "" + tokenizer.model_max_length = original_model.config.max_length + + size = {"height": original_model.config.input_size[0], "width": original_model.config.input_size[1]} + image_processor = NougatImageProcessor( + do_align_long_axis=original_model.config.align_long_axis, + size=size, + ) + processor = NougatProcessor(image_processor=image_processor, tokenizer=tokenizer) + + # verify pixel_values + pixel_values = processor(image, return_tensors="pt").pixel_values + original_pixel_values = original_model.encoder.prepare_input(image).unsqueeze(0) + + assert torch.allclose(original_pixel_values, pixel_values) + + # verify patch embeddings + original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) + patch_embeddings, _ = model.encoder.embeddings(pixel_values) + assert torch.allclose(original_patch_embed, patch_embeddings) + + # verify encoder hidden states + original_last_hidden_state = original_model.encoder(pixel_values) + last_hidden_state = model.encoder(pixel_values).last_hidden_state + assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) + + # NOTE original model does not use tied weights for embeddings of decoder + original_embeddings = original_model.decoder.model.model.decoder.embed_tokens + embeddings = model.decoder.model.decoder.embed_tokens + assert torch.allclose(original_embeddings.weight, embeddings.weight, atol=1e-3) + + # verify decoder hidden states + prompt = "hello world" + decoder_input_ids = original_model.decoder.tokenizer( + prompt, add_special_tokens=False, return_tensors="pt" + ).input_ids + decoder_attention_mask = torch.ones_like(decoder_input_ids) + original_logits = original_model( + image_tensors=pixel_values, decoder_input_ids=decoder_input_ids, attention_mask=decoder_attention_mask + ).logits + logits = model( + pixel_values, + decoder_input_ids=decoder_input_ids[:, :-1], + decoder_attention_mask=decoder_attention_mask[:, :-1], + ).logits + assert torch.allclose(original_logits, logits, atol=1e-3) + + # verify generation + outputs = model.generate( + pixel_values, + min_length=1, + max_length=30, + pad_token_id=tokenizer.pad_token_id, + eos_token_id=tokenizer.eos_token_id, + use_cache=True, + bad_words_ids=[ + [tokenizer.unk_token_id], + ], + return_dict_in_generate=True, + do_sample=False, + ) + generated = tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0] + + if model_tag == "0.1.0-base": + expected_generation = "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lblec" + elif model_tag == "0.1.0-small": + expected_generation = ( + "# Nougat: Neural Optical Understanding for Academic Documents\n\nLukas Blecher\n\nCorrespondence to: lble" + ) + else: + raise ValueError(f"Unexpected model tag: {model_tag}") + + assert generated == expected_generation + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and processor to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + tag_to_name = {"0.1.0-base": "nougat-base", "0.1.0-small": "nougat-small"} + model_name = tag_to_name[model_tag] + + model.push_to_hub(f"facebook/{model_name}") + processor.push_to_hub(f"facebook/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_tag", + default="0.1.0-base", + required=False, + type=str, + choices=["0.1.0-base", "0.1.0-small"], + help="Tag of the original model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + required=False, + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + action="store_true", + help="Whether or not to push the converted model and processor to the 🤗 hub.", + ) + + args = parser.parse_args() + convert_nougat_checkpoint(args.model_tag, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py b/venv/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py new file mode 100644 index 0000000000000000000000000000000000000000..49913d5baa080ba3797b4383983d22f15691004c --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/nougat/image_processing_nougat.py @@ -0,0 +1,532 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for Nougat.""" + +from typing import Dict, List, Optional, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( + get_resize_output_image_size, + pad, + resize, + to_channel_dimension_format, + to_pil_image, +) +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_image_size, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_kwargs, + validate_preprocess_arguments, +) +from ...utils import TensorType, logging +from ...utils.import_utils import is_cv2_available, is_vision_available + + +logger = logging.get_logger(__name__) + + +if is_cv2_available(): + pass + + +if is_vision_available(): + import PIL + + +class NougatImageProcessor(BaseImageProcessor): + r""" + Constructs a Nougat image processor. + + Args: + do_crop_margin (`bool`, *optional*, defaults to `True`): + Whether to crop the image margins. + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by + `do_resize` in the `preprocess` method. + size (`Dict[str, int]` *optional*, defaults to `{"height": 896, "width": 672}`): + Size of the image after resizing. Can be overridden by `size` in the `preprocess` method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): + Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. + do_thumbnail (`bool`, *optional*, defaults to `True`): + Whether to resize the image using thumbnail method. + do_align_long_axis (`bool`, *optional*, defaults to `False`): + Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. + do_pad (`bool`, *optional*, defaults to `True`): + Whether to pad the images to the largest image size in the batch. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Image standard deviation. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_crop_margin: bool = True, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BILINEAR, + do_thumbnail: bool = True, + do_align_long_axis: bool = False, + do_pad: bool = True, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + + size = size if size is not None else {"height": 896, "width": 672} + size = get_size_dict(size) + + self.do_crop_margin = do_crop_margin + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_thumbnail = do_thumbnail + self.do_align_long_axis = do_align_long_axis + self.do_pad = do_pad + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + self._valid_processor_keys = [ + "images", + "do_crop_margin", + "do_resize", + "size", + "resample", + "do_thumbnail", + "do_align_long_axis", + "do_pad", + "do_rescale", + "rescale_factor", + "do_normalize", + "image_mean", + "image_std", + "return_tensors", + "data_format", + "input_data_format", + ] + + def python_find_non_zero(self, image: np.array): + """This is a reimplementation of a findNonZero function equivalent to cv2.""" + non_zero_indices = np.column_stack(np.nonzero(image)) + idxvec = non_zero_indices[:, [1, 0]] + idxvec = idxvec.reshape(-1, 1, 2) + return idxvec + + def python_bounding_rect(self, coordinates): + """This is a reimplementation of a BoundingRect function equivalent to cv2.""" + min_values = np.min(coordinates, axis=(0, 1)).astype(int) + max_values = np.max(coordinates, axis=(0, 1)).astype(int) + x_min, y_min = min_values[0], min_values[1] + width = max_values[0] - x_min + 1 + height = max_values[1] - y_min + 1 + return x_min, y_min, width, height + + def crop_margin( + self, + image: np.array, + gray_threshold: int = 200, + data_format: Optional[ChannelDimension] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.array: + """ + Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the + threshold). + + Args: + image (`np.array`): + The image to be cropped. + gray_threshold (`int`, *optional*, defaults to `200`) + Value below which pixels are considered to be gray. + data_format (`ChannelDimension`, *optional*): + The channel dimension format of the output image. If unset, will use the inferred format from the + input. + input_data_format (`ChannelDimension`, *optional*): + The channel dimension format of the input image. If unset, will use the inferred format from the input. + """ + if input_data_format is None: + input_data_format = infer_channel_dimension_format(image) + + image = to_pil_image(image, input_data_format=input_data_format) + data = np.array(image.convert("L")).astype(np.uint8) + max_val = data.max() + min_val = data.min() + if max_val == min_val: + image = np.array(image) + image = ( + to_channel_dimension_format(image, data_format, input_data_format) + if data_format is not None + else image + ) + return image + data = (data - min_val) / (max_val - min_val) * 255 + gray = data < gray_threshold + coords = self.python_find_non_zero(gray) + x_min, y_min, width, height = self.python_bounding_rect(coords) + image = image.crop((x_min, y_min, x_min + width, y_min + height)) + image = np.array(image).astype(np.uint8) + image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) + + image = ( + to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image + ) + + return image + + # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.align_long_axis + def align_long_axis( + self, + image: np.ndarray, + size: Dict[str, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Align the long axis of the image to the longest axis of the specified size. + + Args: + image (`np.ndarray`): + The image to be aligned. + size (`Dict[str, int]`): + The size `{"height": h, "width": w}` to align the long axis to. + data_format (`str` or `ChannelDimension`, *optional*): + The data format of the output image. If unset, the same format as the input image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + + Returns: + `np.ndarray`: The aligned image. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = size["height"], size["width"] + + if (output_width < output_height and input_width > input_height) or ( + output_width > output_height and input_width < input_height + ): + image = np.rot90(image, 3) + + if data_format is not None: + image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) + + return image + + def pad_image( + self, + image: np.ndarray, + size: Dict[str, int], + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """ + Pad the image to the specified size at the top, bottom, left and right. + + Args: + image (`np.ndarray`): + The image to be padded. + size (`Dict[str, int]`): + The size `{"height": h, "width": w}` to pad the image to. + data_format (`str` or `ChannelDimension`, *optional*): + The data format of the output image. If unset, the same format as the input image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + output_height, output_width = size["height"], size["width"] + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + + delta_width = output_width - input_width + delta_height = output_height - input_height + + pad_top = delta_height // 2 + pad_left = delta_width // 2 + + pad_bottom = delta_height - pad_top + pad_right = delta_width - pad_left + + padding = ((pad_top, pad_bottom), (pad_left, pad_right)) + return pad(image, padding, data_format=data_format, input_data_format=input_data_format) + + # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.thumbnail + def thumbnail( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any + corresponding dimension of the specified size. + + Args: + image (`np.ndarray`): + The image to be resized. + size (`Dict[str, int]`): + The size `{"height": h, "width": w}` to resize the image to. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + The resampling filter to use. + data_format (`Optional[Union[str, ChannelDimension]]`, *optional*): + The data format of the output image. If unset, the same format as the input image is used. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + input_height, input_width = get_image_size(image, channel_dim=input_data_format) + output_height, output_width = size["height"], size["width"] + + # We always resize to the smallest of either the input or output size. + height = min(input_height, output_height) + width = min(input_width, output_width) + + if height == input_height and width == input_width: + return image + + if input_height > input_width: + width = int(input_width * height / input_height) + elif input_width > input_height: + height = int(input_height * width / input_width) + + return resize( + image, + size=(height, width), + resample=resample, + reducing_gap=2.0, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + # Copied from transformers.models.donut.image_processing_donut.DonutImageProcessor.resize + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resizes `image` to `(height, width)` specified by `size` using the PIL library. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + Resampling filter to use when resiizing the image. + data_format (`str` or `ChannelDimension`, *optional*): + The channel dimension format of the image. If not provided, it will be the same as the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format of the input image. If not provided, it will be inferred. + """ + size = get_size_dict(size) + shortest_edge = min(size["height"], size["width"]) + output_size = get_resize_output_image_size( + image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format + ) + resized_image = resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + return resized_image + + def preprocess( + self, + images: ImageInput, + do_crop_margin: bool = None, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_thumbnail: bool = None, + do_align_long_axis: bool = None, + do_pad: bool = None, + do_rescale: bool = None, + rescale_factor: Union[int, float] = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. + do_crop_margin (`bool`, *optional*, defaults to `self.do_crop_margin`): + Whether to crop the image margins. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Size of the image after resizing. Shortest edge of the image is resized to min(size["height"], + size["width"]) with the longest edge resized to keep the input aspect ratio. + resample (`int`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only + has an effect if `do_resize` is set to `True`. + do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): + Whether to resize the image using thumbnail method. + do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`): + Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. + do_pad (`bool`, *optional*, defaults to `self.do_pad`): + Whether to pad the images to the largest image size in the batch. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image by the specified scale `rescale_factor`. + rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`): + Scale factor to use if rescaling the image. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use for normalization. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use for normalization. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: defaults to the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_crop_margin = do_crop_margin if do_crop_margin is not None else self.do_crop_margin + do_resize = do_resize if do_resize is not None else self.do_resize + size = size if size is not None else self.size + resample = resample if resample is not None else self.resample + do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail + do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis + do_pad = do_pad if do_pad is not None else self.do_pad + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + images = make_list_of_images(images) + + validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_pad=do_pad, + size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg. + do_resize=do_resize, + size=size, + resample=resample, + ) + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_crop_margin: + images = [self.crop_margin(image, input_data_format=input_data_format) for image in images] + + if do_align_long_axis: + images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images] + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_thumbnail: + images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images] + + if do_pad: + images = [self.pad_image(image=image, size=size, input_data_format=input_data_format) for image in images] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + data = {"pixel_values": images} + return BatchFeature(data=data, tensor_type=return_tensors) diff --git a/venv/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py b/venv/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py new file mode 100644 index 0000000000000000000000000000000000000000..8f94c6718ba6600ebebef4c0e1fdb9865c609e5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/nougat/processing_nougat.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for Nougat. +""" + +from typing import Dict, List, Optional, Union + +from transformers.tokenization_utils_base import PreTokenizedInput, TextInput, TruncationStrategy + +from ...processing_utils import ProcessorMixin +from ...utils import PaddingStrategy, TensorType + + +class NougatProcessor(ProcessorMixin): + r""" + Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor. + + [`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the + [`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information. + + Args: + image_processor ([`NougatImageProcessor`]): + An instance of [`NougatImageProcessor`]. The image processor is a required input. + tokenizer ([`NougatTokenizerFast`]): + An instance of [`NougatTokenizerFast`]. The tokenizer is a required input. + """ + + attributes = ["image_processor", "tokenizer"] + image_processor_class = "AutoImageProcessor" + tokenizer_class = "AutoTokenizer" + + def __init__(self, image_processor, tokenizer): + super().__init__(image_processor, tokenizer) + self.current_processor = self.image_processor + + def __call__( + self, + images=None, + text=None, + do_crop_margin: bool = None, + do_resize: bool = None, + size: Dict[str, int] = None, + resample: "PILImageResampling" = None, # noqa: F821 + do_thumbnail: bool = None, + do_align_long_axis: bool = None, + do_pad: bool = None, + do_rescale: bool = None, + rescale_factor: Union[int, float] = None, + do_normalize: bool = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821 + input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821 + text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None, + text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, + text_pair_target: Optional[ + Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] + ] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ): + if images is None and text is None: + raise ValueError("You need to specify either an `images` or `text` input to process.") + + if images is not None: + inputs = self.image_processor( + images, + do_crop_margin=do_crop_margin, + do_resize=do_resize, + size=size, + resample=resample, + do_thumbnail=do_thumbnail, + do_align_long_axis=do_align_long_axis, + do_pad=do_pad, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + return_tensors=return_tensors, + data_format=data_format, + input_data_format=input_data_format, + ) + if text is not None: + encodings = self.tokenizer( + text, + text_pair=text_pair, + text_target=text_target, + text_pair_target=text_pair_target, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + is_split_into_words=is_split_into_words, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + ) + + if text is None: + return inputs + elif images is None: + return encodings + else: + inputs["labels"] = encodings["input_ids"] + return inputs + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer + to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + def post_process_generation(self, *args, **kwargs): + """ + This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`]. + Please refer to the docstring of this method for more information. + """ + return self.tokenizer.post_process_generation(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..344bcfa41654d1bc09795386c7a940b9184a509b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__init__.py @@ -0,0 +1,90 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available + + +_import_structure = { + "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], + "tokenization_roc_bert": ["RoCBertTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + pass + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_roc_bert"] = [ + "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "RoCBertForCausalLM", + "RoCBertForMaskedLM", + "RoCBertForMultipleChoice", + "RoCBertForPreTraining", + "RoCBertForQuestionAnswering", + "RoCBertForSequenceClassification", + "RoCBertForTokenClassification", + "RoCBertLayer", + "RoCBertModel", + "RoCBertPreTrainedModel", + "load_tf_weights_in_roc_bert", + ] + +if TYPE_CHECKING: + from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig + from .tokenization_roc_bert import RoCBertTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + raise OptionalDependencyNotAvailable() + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_roc_bert import ( + ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + RoCBertForCausalLM, + RoCBertForMaskedLM, + RoCBertForMultipleChoice, + RoCBertForPreTraining, + RoCBertForQuestionAnswering, + RoCBertForSequenceClassification, + RoCBertForTokenClassification, + RoCBertLayer, + RoCBertModel, + RoCBertPreTrainedModel, + load_tf_weights_in_roc_bert, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b3c4a74952196734853152f2d9ed955a01676e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/configuration_roc_bert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f0ae6ca981b2f08b0f9824f979dc3e3a831e6be Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/__pycache__/tokenization_roc_bert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/configuration_roc_bert.py b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/configuration_roc_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..26f74ee4c462d01367c4fbfee19bff73819de636 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/configuration_roc_bert.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" RoCBert model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class RoCBertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a + RoCBert model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the RoCBert + [weiweishi/roc-bert-base-zh](https://huggingface.co/weiweishi/roc-bert-base-zh) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`RoCBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`RoCBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + enable_pronunciation (`bool`, *optional*, defaults to `True`): + Whether or not the model use pronunciation embed when training. + enable_shape (`bool`, *optional*, defaults to `True`): + Whether or not the model use shape embed when training. + pronunciation_embed_dim (`int`, *optional*, defaults to 768): + Dimension of the pronunciation_embed. + pronunciation_vocab_size (`int`, *optional*, defaults to 910): + Pronunciation Vocabulary size of the RoCBert model. Defines the number of different tokens that can be + represented by the `input_pronunciation_ids` passed when calling [`RoCBertModel`]. + shape_embed_dim (`int`, *optional*, defaults to 512): + Dimension of the shape_embed. + shape_vocab_size (`int`, *optional*, defaults to 24858): + Shape Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented + by the `input_shape_ids` passed when calling [`RoCBertModel`]. + concat_input (`bool`, *optional*, defaults to `True`): + Defines the way of merging the shape_embed, pronunciation_embed and word_embed, if the value is true, + output_embed = torch.cat((word_embed, shape_embed, pronunciation_embed), -1), else output_embed = + (word_embed + shape_embed + pronunciation_embed) / 3 + Example: + + ```python + >>> from transformers import RoCBertModel, RoCBertConfig + + >>> # Initializing a RoCBert weiweishi/roc-bert-base-zh style configuration + >>> configuration = RoCBertConfig() + + >>> # Initializing a model from the weiweishi/roc-bert-base-zh style configuration + >>> model = RoCBertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "roc_bert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + use_cache=True, + pad_token_id=0, + position_embedding_type="absolute", + classifier_dropout=None, + enable_pronunciation=True, + enable_shape=True, + pronunciation_embed_dim=768, + pronunciation_vocab_size=910, + shape_embed_dim=512, + shape_vocab_size=24858, + concat_input=True, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.type_vocab_size = type_vocab_size + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache + self.enable_pronunciation = enable_pronunciation + self.enable_shape = enable_shape + self.pronunciation_embed_dim = pronunciation_embed_dim + self.pronunciation_vocab_size = pronunciation_vocab_size + self.shape_embed_dim = shape_embed_dim + self.shape_vocab_size = shape_vocab_size + self.concat_input = concat_input + self.position_embedding_type = position_embedding_type + self.classifier_dropout = classifier_dropout + super().__init__(pad_token_id=pad_token_id, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/modeling_roc_bert.py b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/modeling_roc_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..51850c9af1d5c05b88262263b4fbe948eb5db779 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/modeling_roc_bert.py @@ -0,0 +1,1985 @@ +# coding=utf-8 +# Copyright 2022 WeChatAI The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch RoCBert model.""" + +import math +import os +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_roc_bert import RoCBertConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "weiweishi/roc-bert-base-zh" +_CONFIG_FOR_DOC = "RoCBertConfig" + +# Base model docstring +_EXPECTED_OUTPUT_SHAPE = [1, 8, 768] + +# Token Classification output +_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "ArthurZ/dummy-rocbert-ner" +_TOKEN_CLASS_EXPECTED_OUTPUT = ["S-EVENT", "S-FAC", "I-ORDINAL", "I-ORDINAL", "E-ORG", "E-LANGUAGE", "E-ORG", "E-ORG", "E-ORG", "E-ORG", "I-EVENT", "S-TIME", "S-TIME", "E-LANGUAGE", "S-TIME", "E-DATE", "I-ORDINAL", "E-QUANTITY", "E-LANGUAGE", "S-TIME", "B-ORDINAL", "S-PRODUCT", "E-LANGUAGE", "E-LANGUAGE", "E-ORG", "E-LOC", "S-TIME", "I-ORDINAL", "S-FAC", "O", "S-GPE", "I-EVENT", "S-GPE", "E-LANGUAGE", "E-ORG", "S-EVENT", "S-FAC", "S-FAC", "S-FAC", "E-ORG", "S-FAC", "E-ORG", "S-GPE"] # fmt: skip +_TOKEN_CLASS_EXPECTED_LOSS = 3.62 + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/dummy-rocbert-seq" +_SEQ_CLASS_EXPECTED_OUTPUT = "'financial news'" +_SEQ_CLASS_EXPECTED_LOSS = 2.31 + +# QuestionAsnwering docstring +_CHECKPOINT_FOR_QA = "ArthurZ/dummy-rocbert-qa" +_QA_EXPECTED_OUTPUT = "''" +_QA_EXPECTED_LOSS = 3.75 +_QA_TARGET_START_INDEX = 14 +_QA_TARGET_END_INDEX = 15 + +# Maske language modeling + +from ..deprecated._archive_maps import ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.bert.modeling_bert.load_tf_weights_in_bert with bert->roc_bert +def load_tf_weights_in_roc_bert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + if pointer.shape != array.shape: + raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") + except ValueError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class RoCBertEmbeddings(nn.Module): + """Construct the embeddings from word, position, shape, pronunciation and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.pronunciation_embed = nn.Embedding( + config.pronunciation_vocab_size, config.pronunciation_embed_dim, padding_idx=config.pad_token_id + ) + self.shape_embed = nn.Embedding( + config.shape_vocab_size, config.shape_embed_dim, padding_idx=config.pad_token_id + ) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + self.enable_pronunciation = config.enable_pronunciation + self.enable_shape = config.enable_shape + + if config.concat_input: + input_dim = config.hidden_size + if self.enable_pronunciation: + pronunciation_dim = config.pronunciation_embed_dim + input_dim += pronunciation_dim + if self.enable_shape: + shape_dim = config.shape_embed_dim + input_dim += shape_dim + self.map_inputs_layer = torch.nn.Linear(input_dim, config.hidden_size) + else: + self.map_inputs_layer = None + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) + + def forward( + self, + input_ids=None, + input_shape_ids=None, + input_pronunciation_ids=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if self.map_inputs_layer is None: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + + denominator = 1 + embedding_in = torch.clone(embeddings) + if self.enable_shape and input_shape_ids is not None: + embedding_shape = self.shape_embed(input_shape_ids) + embedding_in += embedding_shape + denominator += 1 + if self.enable_pronunciation and input_pronunciation_ids is not None: + embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids) + embedding_in += embedding_pronunciation + denominator += 1 + + embedding_in /= denominator + return embedding_in + else: + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) # embedding_word + device = inputs_embeds.device + + embedding_in = torch.clone(inputs_embeds) + if self.enable_shape: + if input_shape_ids is None: + input_shape_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + embedding_shape = self.shape_embed(input_shape_ids) + embedding_in = torch.cat((embedding_in, embedding_shape), -1) + if self.enable_pronunciation: + if input_pronunciation_ids is None: + input_pronunciation_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + embedding_pronunciation = self.pronunciation_embed(input_pronunciation_ids) + embedding_in = torch.cat((embedding_in, embedding_pronunciation), -1) + + embedding_in = self.map_inputs_layer(embedding_in) # batch_size * seq_len * hidden_dim + + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embedding_in += token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embedding_in += position_embeddings + + embedding_in = self.LayerNorm(embedding_in) + embedding_in = self.dropout(embedding_in) + return embedding_in + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RoCBert +class RoCBertSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in RoCBertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RoCBert +class RoCBertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->RoCBert +class RoCBertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = RoCBertSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = RoCBertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoCBert +class RoCBertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RoCBert +class RoCBertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RoCBert +class RoCBertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = RoCBertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = RoCBertAttention(config, position_embedding_type="absolute") + self.intermediate = RoCBertIntermediate(config) + self.output = RoCBertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->RoCBert +class RoCBertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([RoCBertLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RoCBert +class RoCBertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RoCBert +class RoCBertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->RoCBert +class RoCBertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = RoCBertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RoCBert +class RoCBertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = RoCBertLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert->RoCBert,bert->roc_bert +class RoCBertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = RoCBertConfig + load_tf_weights = load_tf_weights_in_roc_bert + base_model_prefix = "roc_bert" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +ROC_BERT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`RoCBertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ROC_BERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + input_shape_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the shape vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input_shape_ids) + input_pronunciation_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the pronunciation vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input_pronunciation_ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare RoCBert Model transformer outputting raw hidden-states without any specific head on top.", + ROC_BERT_START_DOCSTRING, +) +class RoCBertModel(RoCBertPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to be initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->RoCBert + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = RoCBertEmbeddings(config) + self.encoder = RoCBertEncoder(config) + + self.pooler = RoCBertPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def get_pronunciation_embeddings(self): + return self.embeddings.pronunciation_embed + + def set_pronunciation_embeddings(self, value): + self.embeddings.pronunciation_embed = value + + def get_shape_embeddings(self): + return self.embeddings.shape_embed + + def set_shape_embeddings(self, value): + self.embeddings.shape_embed = value + + # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + RoCBert Model with contrastive loss and masked_lm_loss during the pretraining. + """, + ROC_BERT_START_DOCSTRING, +) +class RoCBertForPreTraining(RoCBertPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.roc_bert = RoCBertModel(config) + self.cls = RoCBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + attack_input_ids: Optional[torch.Tensor] = None, + attack_input_shape_ids: Optional[torch.Tensor] = None, + attack_input_pronunciation_ids: Optional[torch.Tensor] = None, + attack_attention_mask: Optional[torch.Tensor] = None, + attack_token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels_input_ids: Optional[torch.Tensor] = None, + labels_input_shape_ids: Optional[torch.Tensor] = None, + labels_input_pronunciation_ids: Optional[torch.Tensor] = None, + labels_attention_mask: Optional[torch.Tensor] = None, + labels_token_type_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + attack_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + attack sample ids for computing the contrastive loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + attack_input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + attack sample shape ids for computing the contrastive loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + attack_input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + attack sample pronunciation ids for computing the contrastive loss. Indices should be in `[-100, 0, + ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + labels_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + target ids for computing the contrastive loss and masked_lm_loss . Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + labels_input_shape_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + target shape ids for computing the contrastive loss and masked_lm_loss . Indices should be in `[-100, + 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + labels_input_pronunciation_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + target pronunciation ids for computing the contrastive loss and masked_lm_loss . Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., + config.vocab_size]` + + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RoCBertForPreTraining + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") + >>> model = RoCBertForPreTraining.from_pretrained("weiweishi/roc-bert-base-zh") + + >>> inputs = tokenizer("你好,很高兴认识你", return_tensors="pt") + >>> attack_inputs = {} + >>> for key in list(inputs.keys()): + ... attack_inputs[f"attack_{key}"] = inputs[key] + >>> label_inputs = {} + >>> for key in list(inputs.keys()): + ... label_inputs[f"labels_{key}"] = inputs[key] + + >>> inputs.update(label_inputs) + >>> inputs.update(attack_inputs) + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + >>> logits.shape + torch.Size([1, 11, 21128]) + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores = self.cls(sequence_output) + + loss = None + if labels_input_ids is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels_input_ids.view(-1)) + + if attack_input_ids is not None: + batch_size, _ = labels_input_ids.shape + device = labels_input_ids.device + + target_inputs = torch.clone(labels_input_ids) + target_inputs[target_inputs == -100] = self.config.pad_token_id + + labels_output = self.roc_bert( + target_inputs, + input_shape_ids=labels_input_shape_ids, + input_pronunciation_ids=labels_input_pronunciation_ids, + attention_mask=labels_attention_mask, + token_type_ids=labels_token_type_ids, + return_dict=return_dict, + ) + attack_output = self.roc_bert( + attack_input_ids, + input_shape_ids=attack_input_shape_ids, + input_pronunciation_ids=attack_input_pronunciation_ids, + attention_mask=attack_attention_mask, + token_type_ids=attack_token_type_ids, + return_dict=return_dict, + ) + + labels_pooled_output = labels_output[1] + attack_pooled_output = attack_output[1] + + pooled_output_norm = torch.nn.functional.normalize(pooled_output, dim=-1) + labels_pooled_output_norm = torch.nn.functional.normalize(labels_pooled_output, dim=-1) + attack_pooled_output_norm = torch.nn.functional.normalize(attack_pooled_output, dim=-1) + + sim_matrix = torch.matmul(pooled_output_norm, attack_pooled_output_norm.T) # batch_size * hidden_dim + sim_matrix_target = torch.matmul(labels_pooled_output_norm, attack_pooled_output_norm.T) + batch_labels = torch.tensor(list(range(batch_size)), device=device) + contrastive_loss = ( + loss_fct(100 * sim_matrix.view(batch_size, -1), batch_labels.view(-1)) + + loss_fct(100 * sim_matrix_target.view(batch_size, -1), batch_labels.view(-1)) + ) / 2 + + loss = contrastive_loss + masked_lm_loss + else: + loss = masked_lm_loss + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MaskedLMOutput( + loss=loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings("""RoCBert Model with a `language modeling` head on top.""", ROC_BERT_START_DOCSTRING) +class RoCBertForMaskedLM(RoCBertPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `RoCBertForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.roc_bert = RoCBertModel(config, add_pooling_layer=False) + self.cls = RoCBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Example: + ```python + >>> from transformers import AutoTokenizer, RoCBertForMaskedLM + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") + >>> model = RoCBertForMaskedLM.from_pretrained("weiweishi/roc-bert-base-zh") + + >>> inputs = tokenizer("法国是首都[MASK].", return_tensors="pt") + + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + + >>> # retrieve index of {mask} + >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] + + >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) + >>> tokenizer.decode(predicted_token_id) + '.' + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, input_shape_ids=None, input_pronunciation_ids=None, attention_mask=None, **model_kwargs + ): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + if self.config.pad_token_id is None: + raise ValueError("The PAD token should be defined for generation") + + attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) + dummy_token = torch.full( + (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + if input_shape_ids is not None: + input_shape_ids = torch.cat([input_shape_ids, dummy_token], dim=1) + if input_pronunciation_ids is not None: + input_pronunciation_ids = torch.cat([input_pronunciation_ids, dummy_token], dim=1) + + return { + "input_ids": input_ids, + "input_shape_ids": input_shape_ids, + "input_pronunciation_ids": input_pronunciation_ids, + "attention_mask": attention_mask, + } + + +@add_start_docstrings( + """RoCBert Model with a `language modeling` head on top for CLM fine-tuning.""", ROC_BERT_START_DOCSTRING +) +class RoCBertForCausalLM(RoCBertPreTrainedModel): + _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->RoCBertForCausalLM,Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `RoCRoCBertForCausalLM` as a standalone, add `is_decoder=True.`") + + self.roc_bert = RoCBertModel(config, add_pooling_layer=False) + self.cls = RoCBertOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.Tensor]] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are + only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, RoCBertForCausalLM, RoCBertConfig + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh") + >>> config = RoCBertConfig.from_pretrained("weiweishi/roc-bert-base-zh") + >>> config.is_decoder = True + >>> model = RoCBertForCausalLM.from_pretrained("weiweishi/roc-bert-base-zh", config=config) + + >>> inputs = tokenizer("你好,很高兴认识你", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids, + input_shape_ids=None, + input_pronunciation_ids=None, + past_key_values=None, + attention_mask=None, + **model_kwargs, + ): + input_shape = input_ids.shape + + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past_key_values is used + if past_key_values is not None: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if input_shape_ids is not None: + input_shape_ids = input_shape_ids[:, -1:] + if input_pronunciation_ids is not None: + input_pronunciation_ids = input_pronunciation_ids[:, -1:] + + return { + "input_ids": input_ids, + "input_shape_ids": input_shape_ids, + "input_pronunciation_ids": input_pronunciation_ids, + "attention_mask": attention_mask, + "past_key_values": past_key_values, + } + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache + def _reorder_cache(self, past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +@add_start_docstrings( + """RoCBert Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks.""", + ROC_BERT_START_DOCSTRING, +) +class RoCBertForSequenceClassification(RoCBertPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.roc_bert = RoCBertModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """RoCBert Model with a multiple choice classification head on top (a linear layer on top of + the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""", + ROC_BERT_START_DOCSTRING, +) +class RoCBertForMultipleChoice(RoCBertPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + + self.roc_bert = RoCBertModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + ROC_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + input_shape_ids = input_shape_ids.view(-1, input_shape_ids.size(-1)) if input_shape_ids is not None else None + input_pronunciation_ids = ( + input_pronunciation_ids.view(-1, input_pronunciation_ids.size(-1)) + if input_pronunciation_ids is not None + else None + ) + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """RoCBert Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""", + ROC_BERT_START_DOCSTRING, +) +class RoCBertForTokenClassification(RoCBertPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roc_bert = RoCBertModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, + expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """RoCBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""", + ROC_BERT_START_DOCSTRING, +) +class RoCBertForQuestionAnswering(RoCBertPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->RoCBert,bert->roc_bert + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roc_bert = RoCBertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_QA, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + input_shape_ids: Optional[torch.Tensor] = None, + input_pronunciation_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roc_bert( + input_ids, + input_shape_ids=input_shape_ids, + input_pronunciation_ids=input_pronunciation_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/roc_bert/tokenization_roc_bert.py b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/tokenization_roc_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..85e1cd1d3228afd43d7c51bf9b8392e4d399a3d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/roc_bert/tokenization_roc_bert.py @@ -0,0 +1,1108 @@ +# coding=utf-8 +# Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for RoCBert.""" + +import collections +import itertools +import json +import os +import unicodedata +from typing import Dict, List, Optional, Tuple, Union + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...tokenization_utils_base import ( + ENCODE_KWARGS_DOCSTRING, + ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, + BatchEncoding, + EncodedInput, + EncodedInputPair, + PaddingStrategy, + PreTokenizedInput, + PreTokenizedInputPair, + TensorType, + TextInput, + TextInputPair, + TruncationStrategy, +) +from ...utils import add_end_docstrings, logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.txt", + "word_shape_file": "word_shape.json", + "word_pronunciation_file": "word_pronunciation.json", +} + + +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class RoCBertTokenizer(PreTrainedTokenizer): + r""" + Args: + Construct a RoCBert tokenizer. Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizer`] which + contains most of the main methods. Users should refer to this superclass for more information regarding those + methods. + vocab_file (`str`): + File containing the vocabulary. + word_shape_file (`str`): + File containing the word => shape info. + word_pronunciation_file (`str`): + File containing the word => pronunciation info. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + vocab_files_names = VOCAB_FILES_NAMES + + def __init__( + self, + vocab_file, + word_shape_file, + word_pronunciation_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + for cur_file in [vocab_file, word_shape_file, word_pronunciation_file]: + if cur_file is None or not os.path.isfile(cur_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google " + "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + + self.vocab = load_vocab(vocab_file) + + with open(word_shape_file, "r", encoding="utf8") as in_file: + self.word_shape = json.load(in_file) + + with open(word_pronunciation_file, "r", encoding="utf8") as in_file: + self.word_pronunciation = json.load(in_file) + + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = RoCBertBasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = RoCBertWordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize + def _tokenize(self, text, split_special_tokens=False): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize( + text, never_split=self.all_special_tokens if not split_special_tokens else None + ): + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput, EncodedInput], + text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + def get_input_ids(text): + if isinstance(text, str): + tokens = self.tokenize(text, **kwargs) + tokens_ids = self.convert_tokens_to_ids(tokens) + tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): + if is_split_into_words: + tokens = list( + itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) + ) + tokens_ids = self.convert_tokens_to_ids(tokens) + tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + else: + tokens_ids = self.convert_tokens_to_ids(text) + tokens_shape_ids = self.convert_tokens_to_shape_ids(text) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(text) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): + return text, [0] * len(text), [0] * len(text) # shape and proun id is pad_value + else: + if is_split_into_words: + raise ValueError( + f"Input {text} is not valid. Should be a string or a list/tuple of strings when" + " `is_split_into_words=True`." + ) + else: + raise ValueError( + f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of" + " integers." + ) + + if return_offsets_mapping: + raise NotImplementedError( + "return_offset_mapping is not available when using Python tokenizers. " + "To use this feature, change your tokenizer to one deriving from " + "transformers.PreTrainedTokenizerFast. " + "More information on available tokenizers at " + "https://github.com/huggingface/transformers/pull/2674" + ) + + first_ids, first_shape_ids, first_proun_ids = get_input_ids(text) + if text_pair is not None: + second_ids, second_shape_ids, second_proun_ids = get_input_ids(text_pair) + else: + second_ids, second_shape_ids, second_proun_ids = None, None, None + + return self.prepare_for_model( + first_ids, + first_shape_ids, + first_proun_ids, + pair_ids=second_ids, + pair_shape_ids=second_shape_ids, + pair_pronunciation_ids=second_proun_ids, + add_special_tokens=add_special_tokens, + padding=padding_strategy.value, + truncation=truncation_strategy.value, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + prepend_batch_axis=True, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + verbose=verbose, + ) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def prepare_for_model( + self, + ids: List[int], + shape_ids: List[int], + pronunciation_ids: List[int], + pair_ids: Optional[List[int]] = None, + pair_shape_ids: Optional[List[int]] = None, + pair_pronunciation_ids: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = None, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + prepend_batch_axis: bool = False, + **kwargs, + ) -> BatchEncoding: + """ + Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It + adds special tokens, truncates sequences if overflowing while taking into account the special tokens and + manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* + different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return + overflowing tokens. Such a combination of arguments will raise an error. + + Args: + ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_tokens_to_id` methods. + shape_ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_token_to_shape_id` methods. + pronunciation_ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_token_to_pronunciation_id` methods. + pair_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_tokens_to_id` methods. + pair_shape_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_token_to_shape_id` methods. + pair_pronunciation_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_token_to_pronunciation_id` methods. + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + pair = bool(pair_ids is not None) + len_ids = len(ids) + len_pair_ids = len(pair_ids) if pair else 0 + + if return_token_type_ids and not add_special_tokens: + raise ValueError( + "Asking to return token_type_ids while setting add_special_tokens to False " + "results in an undefined behavior. Please set add_special_tokens to True or " + "set return_token_type_ids to None." + ) + + if ( + return_overflowing_tokens + and truncation_strategy == TruncationStrategy.LONGEST_FIRST + and pair_ids is not None + ): + raise ValueError( + "Not possible to return overflowing tokens for pair of sequences with the " + "`longest_first`. Please select another truncation strategy than `longest_first`, " + "for instance `only_second` or `only_first`." + ) + + # Load from model defaults + if return_token_type_ids is None: + return_token_type_ids = "token_type_ids" in self.model_input_names + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + encoded_inputs = {} + + # Compute the total size of the returned encodings + total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) + + # Truncation: Handle max sequence length + overflowing_tokens = [] + if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: + ids, pair_ids, overflowing_tokens = self.truncate_sequences( + ids, + pair_ids=pair_ids, + num_tokens_to_remove=total_len - max_length, + truncation_strategy=truncation_strategy, + stride=stride, + ) + shape_ids, pair_shape_ids, _ = self.truncate_sequences( + shape_ids, + pair_ids=pair_shape_ids, + num_tokens_to_remove=total_len - max_length, + truncation_strategy=truncation_strategy, + stride=stride, + ) + pronunciation_ids, pair_pronunciation_ids, _ = self.truncate_sequences( + pronunciation_ids, + pair_ids=pair_pronunciation_ids, + num_tokens_to_remove=total_len - max_length, + truncation_strategy=truncation_strategy, + stride=stride, + ) + + if return_overflowing_tokens: + encoded_inputs["overflowing_tokens"] = overflowing_tokens + encoded_inputs["num_truncated_tokens"] = total_len - max_length + + # Add special tokens + if add_special_tokens: + sequence = self.build_inputs_with_special_tokens(ids, pair_ids) + token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) + input_shape_ids = self.build_inputs_with_special_tokens( + shape_ids, pair_shape_ids, self.word_shape["[UNK]"], self.word_shape["[UNK]"] + ) + input_pronunciation_ids = self.build_inputs_with_special_tokens( + pronunciation_ids, + pair_pronunciation_ids, + self.word_pronunciation["[UNK]"], + self.word_pronunciation["[UNK]"], + ) + else: + sequence = ids + pair_ids if pair_ids else ids + token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair_ids else []) + input_shape_ids = shape_ids + pair_shape_ids if pair_shape_ids else shape_ids + input_pronunciation_ids = ( + pronunciation_ids + pair_pronunciation_ids if pair_pronunciation_ids else pronunciation_ids + ) + + # Build output dictionary + encoded_inputs["input_ids"] = sequence + encoded_inputs["input_shape_ids"] = input_shape_ids + encoded_inputs["input_pronunciation_ids"] = input_pronunciation_ids + if return_token_type_ids: + encoded_inputs["token_type_ids"] = token_type_ids + if return_special_tokens_mask: + if add_special_tokens: + encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) + else: + encoded_inputs["special_tokens_mask"] = [0] * len(sequence) + + # Check lengths + self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) + + # Padding + if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: + encoded_inputs = self.pad( + encoded_inputs, + max_length=max_length, + padding=padding_strategy.value, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + if return_length: + encoded_inputs["length"] = len(encoded_inputs["input_ids"]) + + batch_outputs = BatchEncoding( + encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis + ) + + return batch_outputs + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + # Load from model defaults + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + required_input = encoded_inputs[self.model_input_names[0]] + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if return_attention_mask and "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * len(required_input) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if self.padding_side == "right": + if return_attention_mask: + encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference + ) + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + for key in ["input_shape_ids", "input_pronunciation_ids"]: + if key in encoded_inputs: + encoded_inputs[key] = encoded_inputs[key] + [self.pad_token_id] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference + elif self.padding_side == "left": + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + for key in ["input_shape_ids", "input_pronunciation_ids"]: + if key in encoded_inputs: + encoded_inputs[key] = [self.pad_token_id] * difference + encoded_inputs[key] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + else: + raise ValueError("Invalid padding strategy:" + str(self.padding_side)) + + return encoded_inputs + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + List[PreTokenizedInputPair], + List[EncodedInput], + List[EncodedInputPair], + ], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + is_split_into_words: bool = False, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs, + ) -> BatchEncoding: + def get_input_ids(text): + if isinstance(text, str): + tokens = self.tokenize(text, **kwargs) + tokens_ids = self.convert_tokens_to_ids(tokens) + tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): + if is_split_into_words: + tokens = list( + itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) + ) + tokens_ids = self.convert_tokens_to_ids(tokens) + tokens_shape_ids = self.convert_tokens_to_shape_ids(tokens) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(tokens) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + else: + tokens_ids = self.convert_tokens_to_ids(text) + tokens_shape_ids = self.convert_tokens_to_shape_ids(text) + tokens_proun_ids = self.convert_tokens_to_pronunciation_ids(text) + return tokens_ids, tokens_shape_ids, tokens_proun_ids + elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): + return text, [0] * len(text), [0] * len(text) # shape and proun id is pad_value + else: + raise ValueError( + "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." + ) + + if return_offsets_mapping: + raise NotImplementedError( + "return_offset_mapping is not available when using Python tokenizers. " + "To use this feature, change your tokenizer to one deriving from " + "transformers.PreTrainedTokenizerFast." + ) + + input_ids = [] + input_shape_ids = [] + input_pronunciation_ids = [] + for ids_or_pair_ids in batch_text_or_text_pairs: + if not isinstance(ids_or_pair_ids, (list, tuple)): + ids, pair_ids = ids_or_pair_ids, None + elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)): + ids, pair_ids = ids_or_pair_ids, None + else: + ids, pair_ids = ids_or_pair_ids + + first_ids, first_shape_ids, first_proun_ids = get_input_ids(ids) + if pair_ids is not None: + second_ids, second_shape_ids, second_proun_ids = get_input_ids(pair_ids) + else: + second_ids, second_shape_ids, second_proun_ids = None, None, None + + input_ids.append((first_ids, second_ids)) + input_shape_ids.append((first_shape_ids, second_shape_ids)) + input_pronunciation_ids.append((first_proun_ids, second_proun_ids)) + + batch_outputs = self._batch_prepare_for_model( + input_ids, + batch_shape_ids_pairs=input_shape_ids, + batch_pronunciation_ids_pairs=input_pronunciation_ids, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + return_tensors=return_tensors, + verbose=verbose, + ) + + return BatchEncoding(batch_outputs) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def _batch_prepare_for_model( + self, + batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], + batch_shape_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], + batch_pronunciation_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + """ + Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It + adds special tokens, truncates sequences if overflowing while taking into account the special tokens and + manages a moving window (with user defined stride) for overflowing tokens + + Args: + batch_ids_pairs: list of tokenized input ids or input ids pairs + batch_shape_ids_pairs: list of tokenized input shape ids or input shape ids pairs + batch_pronunciation_ids_pairs: list of tokenized input pronunciation ids or input pronunciation ids pairs + """ + + batch_outputs = {} + for i, (first_ids, second_ids) in enumerate(batch_ids_pairs): + first_shape_ids, second_shape_ids = batch_shape_ids_pairs[i] + first_pronunciation_ids, second_pronunciation_ids = batch_pronunciation_ids_pairs[i] + outputs = self.prepare_for_model( + first_ids, + first_shape_ids, + first_pronunciation_ids, + pair_ids=second_ids, + pair_shape_ids=second_shape_ids, + pair_pronunciation_ids=second_pronunciation_ids, + add_special_tokens=add_special_tokens, + padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward + truncation=truncation_strategy.value, + max_length=max_length, + stride=stride, + pad_to_multiple_of=None, # we pad in batch afterward + return_attention_mask=False, # we pad in batch afterward + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + return_tensors=None, # We convert the whole batch to tensors at the end + prepend_batch_axis=False, + verbose=verbose, + ) + + for key, value in outputs.items(): + if key not in batch_outputs: + batch_outputs[key] = [] + batch_outputs[key].append(value) + + batch_outputs = self.pad( + batch_outputs, + padding=padding_strategy.value, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) + + return batch_outputs + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_token_to_shape_id(self, token): + """Converts a token (str) in an shape_id using the shape vocab.""" + return self.word_shape.get(token, self.word_shape.get(self.unk_token)) + + def convert_tokens_to_shape_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: + if tokens is None: + return None + + ids = [] + for token in tokens: + ids.append(self._convert_token_to_shape_id(token)) + return ids + + def _convert_token_to_pronunciation_id(self, token): + """Converts a token (str) in an shape_id using the shape vocab.""" + return self.word_pronunciation.get(token, self.word_pronunciation.get(self.unk_token)) + + def convert_tokens_to_pronunciation_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: + if tokens is None: + return None + + ids = [] + for token in tokens: + ids.append(self._convert_token_to_pronunciation_id(token)) + return ids + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + cls_token_id: int = None, + sep_token_id: int = None, + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + cls = [self.cls_token_id] if cls_token_id is None else [cls_token_id] + sep = [self.sep_token_id] if sep_token_id is None else [sep_token_id] + if token_ids_1 is None: + return cls + token_ids_0 + sep + return cls + token_ids_0 + sep + token_ids_1 + sep + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str, str, str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"], + ) + word_shape_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_shape_file"], + ) + word_pronunciation_file = os.path.join( + save_directory, + (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_pronunciation_file"], + ) + else: + raise ValueError( + f"Can't find a directory at path '{save_directory}'. To load the vocabulary from a Google " + "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + + with open(word_shape_file, "w", encoding="utf8") as writer: + json.dump(self.word_shape, writer, ensure_ascii=False, indent=4, separators=(", ", ": ")) + + with open(word_pronunciation_file, "w", encoding="utf8") as writer: + json.dump(self.word_pronunciation, writer, ensure_ascii=False, indent=4, separators=(", ", ": ")) + + return ( + vocab_file, + word_shape_file, + word_pronunciation_file, + ) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer with BasicTokenizer->RoCBertBasicTokenizer +class RoCBertBasicTokenizer(object): + """ + Constructs a RoCBertBasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + do_split_on_punc (`bool`, *optional*, defaults to `True`): + In some instances we want to skip the basic punctuation splitting so that later tokenization can capture + the full context of the words, such as contractions. + """ + + def __init__( + self, + do_lower_case=True, + never_split=None, + tokenize_chinese_chars=True, + strip_accents=None, + do_split_on_punc=True, + ): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + self.do_split_on_punc = do_split_on_punc + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + # prevents treating the same character with different unicode codepoints as different characters + unicode_normalized_text = unicodedata.normalize("NFC", text) + orig_tokens = whitespace_tokenize(unicode_normalized_text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if not self.do_split_on_punc or (never_split is not None and text in never_split): + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer with WordpieceTokenizer->RoCBertWordpieceTokenizer +class RoCBertWordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens