applied-ai-018 commited on
Commit
a7f6a73
·
verified ·
1 Parent(s): 8e4eb50

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg.pt +3 -0
  7. lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt +155 -0
  8. lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json +1 -0
  9. lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log +182 -0
  10. lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log +29 -0
  11. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml +43 -0
  12. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log +34 -0
  13. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt +155 -0
  14. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json +850 -0
  15. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json +1 -0
  16. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log +183 -0
  17. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log +29 -0
  18. lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb +0 -0
  19. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml +44 -0
  20. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log +38 -0
  21. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json +850 -0
  22. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log +196 -0
  23. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log +29 -0
  24. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py +65 -0
  26. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +144 -0
  30. venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +1504 -0
  31. venv/lib/python3.10/site-packages/transformers/models/gptj/__init__.py +112 -0
  32. venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py +218 -0
  38. venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py +718 -0
  39. venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py +1427 -0
  40. venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py +1099 -0
  41. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py +70 -0
  42. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py +156 -0
  48. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py +181 -0
  49. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +1343 -0
  50. venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py +526 -0
ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46198f0d490fa17143187016e15d158ca84efcafea46ccfe5d91dda8e0b26bc3
3
+ size 33555612
ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebfd91f176c4b8ce9b3795525983e33255e9a04480e63b4692d12b3abe0b7778
3
+ size 33555627
ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6053a8dab8d5ed81b669e5569809ede3910dc2e615fcac85c2ca1957c4fc95
3
+ size 9372
ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8082119865df759836867ddbfaf9152547cca6f03ddd4c61a268bd14aac036fc
3
+ size 9387
ckpts/universal/global_step20/zero/5.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b67fa3a6d4f6079276d7d1aa7b8d931aea9838af0667abaf36483cc4eceafc8
3
+ size 9293
ckpts/universal/global_step20/zero/9.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7f9c4024e79471049640a1bc60425b091f525f1479a331ede254245f08d3b3d
3
+ size 50332828
lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.0
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.0
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-22 18:59:44,431 INFO StreamThr :4100 [internal.py:wandb_internal():85] W&B internal server running at pid: 4100, started at: 2024-05-22 18:59:44.429193
2
+ 2024-05-22 18:59:44,436 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-22 18:59:44,437 INFO WriterThread:4100 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb
4
+ 2024-05-22 18:59:44,439 DEBUG SenderThread:4100 [sender.py:send():378] send: header
5
+ 2024-05-22 18:59:44,442 DEBUG SenderThread:4100 [sender.py:send():378] send: run
6
+ 2024-05-22 18:59:44,700 INFO SenderThread:4100 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files
7
+ 2024-05-22 18:59:44,701 INFO SenderThread:4100 [sender.py:_start_run_threads():1123] run started: 8sj20j0r with start time 1716404384.42905
8
+ 2024-05-22 18:59:44,705 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-22 18:59:44,705 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-22 18:59:44,826 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-22 18:59:44,828 DEBUG HandlerThread:4100 [system_info.py:__init__():26] System info init
12
+ 2024-05-22 18:59:44,829 DEBUG HandlerThread:4100 [system_info.py:__init__():41] System info init done
13
+ 2024-05-22 18:59:44,829 INFO HandlerThread:4100 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-22 18:59:44,829 INFO SystemMonitor:4100 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-22 18:59:44,829 INFO HandlerThread:4100 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-22 18:59:44,836 INFO SystemMonitor:4100 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-22 18:59:44,836 INFO SystemMonitor:4100 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-22 18:59:44,842 INFO SystemMonitor:4100 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-22 18:59:44,842 INFO SystemMonitor:4100 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-22 18:59:44,922 DEBUG HandlerThread:4100 [system_info.py:probe():150] Probing system
21
+ 2024-05-22 18:59:44,926 DEBUG HandlerThread:4100 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-22 18:59:44,937 ERROR HandlerThread:4100 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_info.py:probe():198] Probing system done
30
+ 2024-05-22 18:59:44,937 DEBUG HandlerThread:4100 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:59:44.922176', 'startedAt': '2024-05-22T18:59:44.408977', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step4000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2347.7606062500004, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3321.773, 'min': 800.0, 'max': 3400.0}, {'current': 3321.767, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.871, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.6415901184082}}, 'memory': {'total': 1007.4379997253418}}
31
+ 2024-05-22 18:59:44,937 INFO HandlerThread:4100 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-22 18:59:44,937 INFO HandlerThread:4100 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-22 18:59:44,942 INFO HandlerThread:4100 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-22 18:59:44,948 DEBUG SenderThread:4100 [sender.py:send():378] send: files
35
+ 2024-05-22 18:59:44,948 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-22 18:59:45,131 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-22 18:59:45,131 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-22 18:59:45,132 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-22 18:59:45,133 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-22 18:59:45,252 DEBUG SenderThread:4100 [sender.py:send():378] send: telemetry
41
+ 2024-05-22 18:59:45,539 INFO wandb-upload_0:4100 [upload_job.py:push():130] Uploaded file /tmp/tmp2xnq2_a6wandb/qzh7qybp-wandb-metadata.json
42
+ 2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log
43
+ 2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt
44
+ 2024-05-22 18:59:45,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json
45
+ 2024-05-22 18:59:47,703 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log
46
+ 2024-05-22 18:59:50,258 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-22 18:59:55,628 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-22 18:59:55,711 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log
49
+ 2024-05-22 18:59:55,920 DEBUG SenderThread:4100 [sender.py:send():378] send: exit
50
+ 2024-05-22 18:59:55,920 INFO SenderThread:4100 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-22 18:59:55,920 INFO SenderThread:4100 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:send_exit():593] send defer
54
+ 2024-05-22 18:59:55,922 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-22 18:59:55,922 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-22 18:59:55,922 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-22 18:59:55,922 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-22 18:59:55,923 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-22 18:59:55,923 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-22 18:59:55,923 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-22 18:59:55,923 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-22 18:59:55,923 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-22 18:59:55,923 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-22 18:59:55,923 INFO HandlerThread:4100 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-22 18:59:55,923 DEBUG SystemMonitor:4100 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-22 18:59:55,924 INFO HandlerThread:4100 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-22 18:59:55,924 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-22 18:59:55,924 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
78
+ 2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 3
79
+ 2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
80
+ 2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 3
81
+ 2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 4
82
+ 2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
83
+ 2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 4
84
+ 2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
85
+ 2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 4
86
+ 2024-05-22 18:59:55,925 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 5
87
+ 2024-05-22 18:59:55,925 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
88
+ 2024-05-22 18:59:55,925 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 5
89
+ 2024-05-22 18:59:55,925 DEBUG SenderThread:4100 [sender.py:send():378] send: summary
90
+ 2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
91
+ 2024-05-22 18:59:55,929 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
92
+ 2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 5
93
+ 2024-05-22 18:59:55,929 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 6
94
+ 2024-05-22 18:59:55,929 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
95
+ 2024-05-22 18:59:55,929 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 6
96
+ 2024-05-22 18:59:55,929 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
97
+ 2024-05-22 18:59:55,930 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 6
98
+ 2024-05-22 18:59:55,934 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: status_report
99
+ 2024-05-22 18:59:56,072 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 7
100
+ 2024-05-22 18:59:56,072 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-22 18:59:56,072 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 7
102
+ 2024-05-22 18:59:56,072 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
103
+ 2024-05-22 18:59:56,072 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 7
104
+ 2024-05-22 18:59:56,712 INFO Thread-12 :4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml
105
+ 2024-05-22 18:59:56,712 INFO Thread-12 :4100 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json
106
+ 2024-05-22 18:59:56,920 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit
107
+ 2024-05-22 18:59:57,280 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-22 18:59:57,281 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit
109
+ 2024-05-22 18:59:57,281 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
110
+ 2024-05-22 18:59:57,281 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 8
111
+ 2024-05-22 18:59:57,281 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
112
+ 2024-05-22 18:59:57,281 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 8
113
+ 2024-05-22 18:59:57,281 INFO SenderThread:4100 [job_builder.py:build():432] Attempting to build job artifact
114
+ 2024-05-22 18:59:57,282 INFO SenderThread:4100 [job_builder.py:_get_source_type():576] no source found
115
+ 2024-05-22 18:59:57,282 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 9
116
+ 2024-05-22 18:59:57,282 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
117
+ 2024-05-22 18:59:57,282 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 9
118
+ 2024-05-22 18:59:57,282 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
119
+ 2024-05-22 18:59:57,282 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 9
120
+ 2024-05-22 18:59:57,282 INFO SenderThread:4100 [dir_watcher.py:finish():358] shutting down directory watcher
121
+ 2024-05-22 18:59:57,713 INFO SenderThread:4100 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log
122
+ 2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files
123
+ 2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml config.yaml
124
+ 2024-05-22 18:59:57,714 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log output.log
125
+ 2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt requirements.txt
126
+ 2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-metadata.json wandb-metadata.json
127
+ 2024-05-22 18:59:57,717 INFO SenderThread:4100 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json wandb-summary.json
128
+ 2024-05-22 18:59:57,717 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-22 18:59:57,717 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-22 18:59:57,717 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 10
131
+ 2024-05-22 18:59:57,719 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-22 18:59:57,719 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 10
133
+ 2024-05-22 18:59:57,719 INFO SenderThread:4100 [file_pusher.py:finish():169] shutting down file pusher
134
+ 2024-05-22 18:59:57,921 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit
135
+ 2024-05-22 18:59:57,921 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit
136
+ 2024-05-22 18:59:57,955 INFO wandb-upload_0:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/config.yaml
137
+ 2024-05-22 18:59:58,393 INFO wandb-upload_1:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/output.log
138
+ 2024-05-22 18:59:58,444 INFO wandb-upload_3:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/wandb-summary.json
139
+ 2024-05-22 18:59:58,446 INFO wandb-upload_2:4100 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/files/requirements.txt
140
+ 2024-05-22 18:59:58,646 INFO Thread-11 (_thread_body):4100 [sender.py:transition_state():613] send defer: 11
141
+ 2024-05-22 18:59:58,646 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-22 18:59:58,646 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 11
143
+ 2024-05-22 18:59:58,646 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-22 18:59:58,646 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 11
145
+ 2024-05-22 18:59:58,646 INFO SenderThread:4100 [file_pusher.py:join():175] waiting for file pusher
146
+ 2024-05-22 18:59:58,647 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 12
147
+ 2024-05-22 18:59:58,647 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
148
+ 2024-05-22 18:59:58,647 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 12
149
+ 2024-05-22 18:59:58,647 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
150
+ 2024-05-22 18:59:58,647 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 12
151
+ 2024-05-22 18:59:58,647 INFO SenderThread:4100 [file_stream.py:finish():601] file stream finish called
152
+ 2024-05-22 18:59:58,708 INFO SenderThread:4100 [file_stream.py:finish():605] file stream finish is done
153
+ 2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 13
154
+ 2024-05-22 18:59:58,709 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
155
+ 2024-05-22 18:59:58,709 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 13
156
+ 2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
157
+ 2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 13
158
+ 2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:transition_state():613] send defer: 14
159
+ 2024-05-22 18:59:58,709 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-22 18:59:58,709 INFO HandlerThread:4100 [handler.py:handle_request_defer():184] handle defer: 14
161
+ 2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send():378] send: final
162
+ 2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send():378] send: footer
163
+ 2024-05-22 18:59:58,709 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: defer
164
+ 2024-05-22 18:59:58,709 INFO SenderThread:4100 [sender.py:send_request_defer():609] handle sender defer: 14
165
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit
166
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: server_info
168
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: get_summary
169
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: sampled_history
170
+ 2024-05-22 18:59:58,710 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: internal_messages
171
+ 2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit
172
+ 2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: poll_exit
173
+ 2024-05-22 18:59:58,711 DEBUG SenderThread:4100 [sender.py:send_request():405] send_request: server_info
174
+ 2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_history_summary_info():3994] rendering history
175
+ 2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
176
+ 2024-05-22 18:59:58,774 INFO MainThread:4100 [wandb_run.py:_footer_sync_info():3953] logging synced files
177
+ 2024-05-22 18:59:58,774 DEBUG HandlerThread:4100 [handler.py:handle_request():158] handle_request: shutdown
178
+ 2024-05-22 18:59:58,774 INFO HandlerThread:4100 [handler.py:finish():882] shutting down handler
179
+ 2024-05-22 18:59:59,711 INFO WriterThread:4100 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/run-8sj20j0r.wandb
180
+ 2024-05-22 18:59:59,774 INFO SenderThread:4100 [sender.py:finish():1545] shutting down sender
181
+ 2024-05-22 18:59:59,774 INFO SenderThread:4100 [file_pusher.py:finish():169] shutting down file pusher
182
+ 2024-05-22 18:59:59,774 INFO SenderThread:4100 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Configure stats pid to 3945
3
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-22 18:59:44,423 WARNING MainThread:3945 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug.log
11
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_185944-8sj20j0r/logs/debug-internal.log
12
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-22 18:59:44,423 INFO MainThread:3945 [wandb_init.py:init():610] starting backend
16
+ 2024-05-22 18:59:44,424 INFO MainThread:3945 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-22 18:59:44,428 INFO MainThread:3945 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-22 18:59:44,428 INFO MainThread:3945 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-22 18:59:44,432 INFO MainThread:3945 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-22 18:59:44,442 INFO MainThread:3945 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-22 18:59:44,704 INFO MainThread:3945 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-22 18:59:44,820 INFO MainThread:3945 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-22 18:59:44,820 INFO MainThread:3945 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-22 18:59:45,132 INFO MainThread:3945 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-22 18:59:45,134 INFO MainThread:3945 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-22 18:59:59,775 WARNING MsgRouterThr:3945 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716468780
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:12:53:00,829 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:12:53:10,418 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:12:53:10,422 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:12:53:10,423 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'}
6
+ 2024-05-23:12:53:12,724 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.3.0
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T12:53:00.613604",
5
+ "startedAt": "2024-05-23T12:53:00.074192",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.20563125,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.58670043945312
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379348754883
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 12}}
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 12:53:00,098 INFO StreamThr :804 [internal.py:wandb_internal():85] W&B internal server running at pid: 804, started at: 2024-05-23 12:53:00.094550
2
+ 2024-05-23 12:53:00,100 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 12:53:00,103 INFO WriterThread:804 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb
4
+ 2024-05-23 12:53:00,105 DEBUG SenderThread:804 [sender.py:send():378] send: header
5
+ 2024-05-23 12:53:00,107 DEBUG SenderThread:804 [sender.py:send():378] send: run
6
+ 2024-05-23 12:53:00,399 INFO SenderThread:804 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files
7
+ 2024-05-23 12:53:00,400 INFO SenderThread:804 [sender.py:_start_run_threads():1123] run started: yqqf3gci with start time 1716468780.094401
8
+ 2024-05-23 12:53:00,406 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 12:53:00,406 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 12:53:00,519 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 12:53:00,522 DEBUG HandlerThread:804 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 12:53:00,522 DEBUG HandlerThread:804 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 12:53:00,522 INFO HandlerThread:804 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 12:53:00,522 INFO SystemMonitor:804 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 12:53:00,522 INFO HandlerThread:804 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 12:53:00,529 INFO SystemMonitor:804 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 12:53:00,529 INFO SystemMonitor:804 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 12:53:00,530 INFO SystemMonitor:804 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 12:53:00,532 INFO SystemMonitor:804 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 12:53:00,613 DEBUG HandlerThread:804 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 12:53:00,616 DEBUG HandlerThread:804 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 12:53:00,628 ERROR HandlerThread:804 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 12:53:00,628 DEBUG HandlerThread:804 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:53:00.613604', 'startedAt': '2024-05-23T12:53:00.074192', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.20563125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.58670043945312}}, 'memory': {'total': 1007.4379348754883}}
31
+ 2024-05-23 12:53:00,629 INFO HandlerThread:804 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 12:53:00,629 INFO HandlerThread:804 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 12:53:00,631 INFO HandlerThread:804 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 12:53:00,637 DEBUG SenderThread:804 [sender.py:send():378] send: files
35
+ 2024-05-23 12:53:00,637 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 12:53:00,822 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 12:53:00,823 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 12:53:00,823 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 12:53:00,824 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 12:53:00,970 DEBUG SenderThread:804 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 12:53:01,256 INFO wandb-upload_0:804 [upload_job.py:push():130] Uploaded file /tmp/tmpverirx0vwandb/s0a5dkg5-wandb-metadata.json
42
+ 2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt
43
+ 2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log
44
+ 2024-05-23 12:53:01,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json
45
+ 2024-05-23 12:53:03,403 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log
46
+ 2024-05-23 12:53:05,972 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 12:53:11,410 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log
48
+ 2024-05-23 12:53:11,424 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-23 12:53:12,739 DEBUG SenderThread:804 [sender.py:send():378] send: exit
50
+ 2024-05-23 12:53:12,739 INFO SenderThread:804 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 12:53:12,739 INFO SenderThread:804 [sender.py:send_exit():587] handling runtime: 12
52
+ 2024-05-23 12:53:12,740 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 12:53:12,740 INFO SenderThread:804 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 12:53:12,741 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 12:53:12,741 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 12:53:12,741 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 12:53:12,741 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 12:53:12,741 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 12:53:12,741 INFO HandlerThread:804 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 12:53:12,741 DEBUG SystemMonitor:804 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 12:53:12,741 DEBUG SystemMonitor:804 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 12:53:12,742 DEBUG SystemMonitor:804 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 12:53:12,742 INFO HandlerThread:804 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 12:53:12,745 INFO HandlerThread:804 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 12:53:12,746 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 12:53:12,746 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 12:53:12,746 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 12:53:12,746 DEBUG SenderThread:804 [sender.py:send():378] send: stats
78
+ 2024-05-23 12:53:12,747 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 12:53:12,747 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 12:53:12,747 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 12:53:12,747 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 12:53:12,747 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 12:53:12,747 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 12:53:12,747 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 12:53:12,748 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 12:53:12,748 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 12:53:12,748 DEBUG SenderThread:804 [sender.py:send():378] send: summary
91
+ 2024-05-23 12:53:12,748 INFO SenderThread:804 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 12:53:12,749 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 12:53:12,749 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 12:53:12,749 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 12:53:12,749 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 12:53:12,749 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 12:53:12,754 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 12:53:12,838 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 12:53:12,838 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 12:53:12,838 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 12:53:12,839 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 12:53:12,839 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 12:53:13,413 INFO Thread-12 :804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml
106
+ 2024-05-23 12:53:13,413 INFO Thread-12 :804 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json
107
+ 2024-05-23 12:53:13,739 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 12:53:14,996 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 12:53:14,996 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 12:53:14,997 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 12:53:14,997 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 12:53:14,997 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 12:53:14,997 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 12:53:14,997 INFO SenderThread:804 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 12:53:14,997 INFO SenderThread:804 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 12:53:14,998 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 12:53:14,998 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 12:53:14,998 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 12:53:14,998 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 12:53:14,998 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 12:53:14,998 INFO SenderThread:804 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log
123
+ 2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files
124
+ 2024-05-23 12:53:15,415 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml config.yaml
125
+ 2024-05-23 12:53:15,416 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log output.log
126
+ 2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt requirements.txt
127
+ 2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-metadata.json wandb-metadata.json
128
+ 2024-05-23 12:53:15,418 INFO SenderThread:804 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json wandb-summary.json
129
+ 2024-05-23 12:53:15,419 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 12:53:15,419 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 12:53:15,419 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 12:53:15,419 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 12:53:15,419 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 12:53:15,419 INFO SenderThread:804 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 12:53:15,662 INFO wandb-upload_0:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/config.yaml
136
+ 2024-05-23 12:53:15,739 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit
137
+ 2024-05-23 12:53:15,740 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit
138
+ 2024-05-23 12:53:15,986 INFO wandb-upload_3:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/wandb-summary.json
139
+ 2024-05-23 12:53:16,023 INFO wandb-upload_1:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/output.log
140
+ 2024-05-23 12:53:16,031 INFO wandb-upload_2:804 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/files/requirements.txt
141
+ 2024-05-23 12:53:16,232 INFO Thread-11 (_thread_body):804 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 12:53:16,232 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 12:53:16,232 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 12:53:16,232 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 12:53:16,232 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 12:53:16,232 INFO SenderThread:804 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 12:53:16,232 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 12:53:16,233 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 12:53:16,233 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 12:53:16,233 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 12:53:16,233 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 12:53:16,233 INFO SenderThread:804 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 12:53:16,443 INFO SenderThread:804 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 12:53:16,443 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 12:53:16,443 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 12:53:16,443 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 12:53:16,443 INFO SenderThread:804 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 12:53:16,443 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 12:53:16,443 INFO HandlerThread:804 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send():378] send: final
163
+ 2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send():378] send: footer
164
+ 2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 12:53:16,444 INFO SenderThread:804 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 12:53:16,444 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 12:53:16,444 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit
168
+ 2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: server_info
170
+ 2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-23 12:53:16,445 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-23 12:53:16,445 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 12:53:16,446 DEBUG SenderThread:804 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 12:53:16,507 INFO MainThread:804 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 12:53:16,508 DEBUG HandlerThread:804 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 12:53:16,508 INFO HandlerThread:804 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 12:53:17,445 INFO WriterThread:804 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb
181
+ 2024-05-23 12:53:17,507 INFO SenderThread:804 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 12:53:17,507 INFO SenderThread:804 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 12:53:17,507 INFO SenderThread:804 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Configure stats pid to 648
3
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 12:53:00,089 WARNING MainThread:648 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug.log
11
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/logs/debug-internal.log
12
+ 2024-05-23 12:53:00,089 INFO MainThread:648 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 12:53:00,090 INFO MainThread:648 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 12:53:00,093 INFO MainThread:648 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 12:53:00,094 INFO MainThread:648 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 12:53:00,097 INFO MainThread:648 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 12:53:00,107 INFO MainThread:648 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 12:53:00,405 INFO MainThread:648 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 12:53:00,513 INFO MainThread:648 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 12:53:00,513 INFO MainThread:648 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 12:53:00,824 INFO MainThread:648 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 12:53:00,826 INFO MainThread:648 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 12:53:17,509 WARNING MsgRouterThr:648 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_125300-yqqf3gci/run-yqqf3gci.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.36.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1717052687
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 13
38
+ - 23
39
+ 4: 3.10.12
40
+ 5: 0.17.0
41
+ 6: 4.36.2
42
+ 8:
43
+ - 5
44
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-30:07:04:48,090 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-30:07:04:57,969 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande']
4
+ 2024-05-30:07:04:57,971 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-30:07:04:57,971 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/'}
6
+ 2024-05-30:07:05:00,271 INFO [huggingface.py:164] Using device 'cuda'
7
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
8
+ warnings.warn(
9
+ Traceback (most recent call last):
10
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
11
+ return _run_code(code, main_globals, None,
12
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
13
+ exec(code, run_globals)
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
15
+ cli_evaluate()
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
17
+ results = evaluator.simple_evaluate(
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
19
+ return fn(*args, **kwargs)
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
21
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
23
+ return cls(**args, **args2)
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 237, in __init__
25
+ self._create_tokenizer(
26
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 630, in _create_tokenizer
27
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/tokenization_auto.py", line 752, in from_pretrained
29
+ config = AutoConfig.from_pretrained(
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 1082, in from_pretrained
31
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 644, in get_config_dict
33
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
34
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 699, in _get_config_dict
35
+ resolved_config_file = cached_file(
36
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 360, in cached_file
37
+ raise EnvironmentError(
38
+ OSError: /mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/ does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k//main' for available files.
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-30T07:04:47.890522",
5
+ "startedAt": "2024-05-30T07:04:47.365134",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/",
13
+ "--tasks",
14
+ "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=english-eval,group=exp2,name=global_step70000"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-35-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.4101375,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3399.997,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.89148712158203
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379920959473
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-30 07:04:47,387 INFO StreamThr :900 [internal.py:wandb_internal():85] W&B internal server running at pid: 900, started at: 2024-05-30 07:04:47.385483
2
+ 2024-05-30 07:04:47,391 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-30 07:04:47,392 INFO WriterThread:900 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb
4
+ 2024-05-30 07:04:47,395 DEBUG SenderThread:900 [sender.py:send():378] send: header
5
+ 2024-05-30 07:04:47,399 DEBUG SenderThread:900 [sender.py:send():378] send: run
6
+ 2024-05-30 07:04:47,689 INFO SenderThread:900 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files
7
+ 2024-05-30 07:04:47,689 INFO SenderThread:900 [sender.py:_start_run_threads():1123] run started: fi4sos5j with start time 1717052687.38595
8
+ 2024-05-30 07:04:47,695 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-30 07:04:47,696 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-30 07:04:47,815 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-30 07:04:47,817 DEBUG HandlerThread:900 [system_info.py:__init__():26] System info init
12
+ 2024-05-30 07:04:47,817 DEBUG HandlerThread:900 [system_info.py:__init__():41] System info init done
13
+ 2024-05-30 07:04:47,817 INFO HandlerThread:900 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-30 07:04:47,818 INFO SystemMonitor:900 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-30 07:04:47,818 INFO HandlerThread:900 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-30 07:04:47,824 INFO SystemMonitor:900 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-30 07:04:47,825 INFO SystemMonitor:900 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-30 07:04:47,828 INFO SystemMonitor:900 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-30 07:04:47,828 INFO SystemMonitor:900 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-30 07:04:47,890 DEBUG HandlerThread:900 [system_info.py:probe():150] Probing system
21
+ 2024-05-30 07:04:47,893 DEBUG HandlerThread:900 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-30 07:04:47,903 ERROR HandlerThread:900 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_info.py:probe():198] Probing system done
30
+ 2024-05-30 07:04:47,903 DEBUG HandlerThread:900 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T07:04:47.890522', 'startedAt': '2024-05-30T07:04:47.365134', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step70000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step70000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-35-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4101375, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.89148712158203}}, 'memory': {'total': 1007.4379920959473}}
31
+ 2024-05-30 07:04:47,903 INFO HandlerThread:900 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-30 07:04:47,903 INFO HandlerThread:900 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-30 07:04:47,906 INFO HandlerThread:900 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-30 07:04:47,913 DEBUG SenderThread:900 [sender.py:send():378] send: files
35
+ 2024-05-30 07:04:47,913 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-30 07:04:48,084 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-30 07:04:48,084 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-30 07:04:48,085 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-30 07:04:48,090 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-30 07:04:48,246 DEBUG SenderThread:900 [sender.py:send():378] send: telemetry
41
+ 2024-05-30 07:04:48,551 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /tmp/tmpg7_ujvdqwandb/75nr9en3-wandb-metadata.json
42
+ 2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
43
+ 2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json
44
+ 2024-05-30 07:04:48,692 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt
45
+ 2024-05-30 07:04:50,691 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
46
+ 2024-05-30 07:04:53,250 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-30 07:04:58,696 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
48
+ 2024-05-30 07:04:58,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-30 07:05:02,714 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
50
+ 2024-05-30 07:05:03,086 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status
51
+ 2024-05-30 07:05:03,086 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status
52
+ 2024-05-30 07:05:04,190 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
53
+ 2024-05-30 07:05:09,190 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
54
+ 2024-05-30 07:05:14,191 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
55
+ 2024-05-30 07:05:18,086 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: stop_status
56
+ 2024-05-30 07:05:18,087 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: stop_status
57
+ 2024-05-30 07:05:19,247 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
58
+ 2024-05-30 07:05:19,759 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml
59
+ 2024-05-30 07:05:25,032 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
60
+ 2024-05-30 07:05:26,573 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
61
+ 2024-05-30 07:05:26,962 DEBUG SenderThread:900 [sender.py:send():378] send: exit
62
+ 2024-05-30 07:05:26,962 INFO SenderThread:900 [sender.py:send_exit():585] handling exit code: 1
63
+ 2024-05-30 07:05:26,962 INFO SenderThread:900 [sender.py:send_exit():587] handling runtime: 39
64
+ 2024-05-30 07:05:26,963 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
65
+ 2024-05-30 07:05:26,963 INFO SenderThread:900 [sender.py:send_exit():593] send defer
66
+ 2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
67
+ 2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 0
68
+ 2024-05-30 07:05:26,964 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
69
+ 2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 0
70
+ 2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 1
71
+ 2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
72
+ 2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 1
73
+ 2024-05-30 07:05:26,964 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
74
+ 2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 1
75
+ 2024-05-30 07:05:26,964 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 2
76
+ 2024-05-30 07:05:26,964 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
77
+ 2024-05-30 07:05:26,964 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 2
78
+ 2024-05-30 07:05:26,964 INFO HandlerThread:900 [system_monitor.py:finish():203] Stopping system monitor
79
+ 2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():172] Starting system metrics aggregation loop
80
+ 2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():179] Finished system metrics aggregation loop
81
+ 2024-05-30 07:05:26,964 DEBUG SystemMonitor:900 [system_monitor.py:_start():183] Publishing last batch of metrics
82
+ 2024-05-30 07:05:26,967 INFO HandlerThread:900 [interfaces.py:finish():200] Joined cpu monitor
83
+ 2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined disk monitor
84
+ 2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined memory monitor
85
+ 2024-05-30 07:05:26,968 INFO HandlerThread:900 [interfaces.py:finish():200] Joined network monitor
86
+ 2024-05-30 07:05:26,968 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
87
+ 2024-05-30 07:05:26,968 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 2
88
+ 2024-05-30 07:05:26,968 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 3
89
+ 2024-05-30 07:05:26,968 DEBUG SenderThread:900 [sender.py:send():378] send: stats
90
+ 2024-05-30 07:05:26,969 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
91
+ 2024-05-30 07:05:26,969 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 3
92
+ 2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 3
94
+ 2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 4
95
+ 2024-05-30 07:05:26,970 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-30 07:05:26,970 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 4
97
+ 2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 4
99
+ 2024-05-30 07:05:26,970 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 5
100
+ 2024-05-30 07:05:26,970 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-30 07:05:26,970 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 5
102
+ 2024-05-30 07:05:26,970 DEBUG SenderThread:900 [sender.py:send():378] send: summary
103
+ 2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
104
+ 2024-05-30 07:05:26,971 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
105
+ 2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 5
106
+ 2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 6
107
+ 2024-05-30 07:05:26,971 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
108
+ 2024-05-30 07:05:26,971 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 6
109
+ 2024-05-30 07:05:26,971 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
110
+ 2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 6
111
+ 2024-05-30 07:05:26,971 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 7
112
+ 2024-05-30 07:05:26,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: status_report
113
+ 2024-05-30 07:05:26,972 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
114
+ 2024-05-30 07:05:26,972 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 7
115
+ 2024-05-30 07:05:26,972 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
116
+ 2024-05-30 07:05:26,972 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 7
117
+ 2024-05-30 07:05:27,574 INFO Thread-12 :900 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json
118
+ 2024-05-30 07:05:27,962 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit
119
+ 2024-05-30 07:05:28,582 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 8
120
+ 2024-05-30 07:05:28,582 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit
121
+ 2024-05-30 07:05:28,582 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
122
+ 2024-05-30 07:05:28,582 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 8
123
+ 2024-05-30 07:05:28,582 INFO Thread-12 :900 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
124
+ 2024-05-30 07:05:28,582 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
125
+ 2024-05-30 07:05:28,583 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 8
126
+ 2024-05-30 07:05:28,583 INFO SenderThread:900 [job_builder.py:build():432] Attempting to build job artifact
127
+ 2024-05-30 07:05:28,583 INFO SenderThread:900 [job_builder.py:_get_source_type():576] no source found
128
+ 2024-05-30 07:05:28,583 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 9
129
+ 2024-05-30 07:05:28,583 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-30 07:05:28,583 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 9
131
+ 2024-05-30 07:05:28,584 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-30 07:05:28,584 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 9
133
+ 2024-05-30 07:05:28,584 INFO SenderThread:900 [dir_watcher.py:finish():358] shutting down directory watcher
134
+ 2024-05-30 07:05:28,962 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit
135
+ 2024-05-30 07:05:29,583 INFO SenderThread:900 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files
136
+ 2024-05-30 07:05:29,584 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json wandb-summary.json
137
+ 2024-05-30 07:05:29,584 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log output.log
138
+ 2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml config.yaml
139
+ 2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt requirements.txt
140
+ 2024-05-30 07:05:29,586 INFO SenderThread:900 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-metadata.json wandb-metadata.json
141
+ 2024-05-30 07:05:29,587 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 10
142
+ 2024-05-30 07:05:29,587 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit
143
+ 2024-05-30 07:05:29,587 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
144
+ 2024-05-30 07:05:29,587 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 10
145
+ 2024-05-30 07:05:29,587 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
146
+ 2024-05-30 07:05:29,587 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 10
147
+ 2024-05-30 07:05:29,587 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher
148
+ 2024-05-30 07:05:29,833 INFO wandb-upload_0:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json
149
+ 2024-05-30 07:05:29,963 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit
150
+ 2024-05-30 07:05:29,963 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit
151
+ 2024-05-30 07:05:30,198 INFO wandb-upload_1:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/output.log
152
+ 2024-05-30 07:05:30,241 INFO wandb-upload_3:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt
153
+ 2024-05-30 07:05:30,267 INFO wandb-upload_2:900 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/config.yaml
154
+ 2024-05-30 07:05:30,467 INFO Thread-11 (_thread_body):900 [sender.py:transition_state():613] send defer: 11
155
+ 2024-05-30 07:05:30,467 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-30 07:05:30,467 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 11
157
+ 2024-05-30 07:05:30,467 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-30 07:05:30,467 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 11
159
+ 2024-05-30 07:05:30,467 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher
160
+ 2024-05-30 07:05:30,468 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 12
161
+ 2024-05-30 07:05:30,468 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
162
+ 2024-05-30 07:05:30,468 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 12
163
+ 2024-05-30 07:05:30,468 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
164
+ 2024-05-30 07:05:30,468 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 12
165
+ 2024-05-30 07:05:30,468 INFO SenderThread:900 [file_stream.py:finish():601] file stream finish called
166
+ 2024-05-30 07:05:30,524 INFO SenderThread:900 [file_stream.py:finish():605] file stream finish is done
167
+ 2024-05-30 07:05:30,524 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 13
168
+ 2024-05-30 07:05:30,524 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
169
+ 2024-05-30 07:05:30,524 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 13
170
+ 2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
171
+ 2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 13
172
+ 2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:transition_state():613] send defer: 14
173
+ 2024-05-30 07:05:30,525 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: defer
174
+ 2024-05-30 07:05:30,525 INFO HandlerThread:900 [handler.py:handle_request_defer():184] handle defer: 14
175
+ 2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send():378] send: final
176
+ 2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send():378] send: footer
177
+ 2024-05-30 07:05:30,525 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: defer
178
+ 2024-05-30 07:05:30,525 INFO SenderThread:900 [sender.py:send_request_defer():609] handle sender defer: 14
179
+ 2024-05-30 07:05:30,525 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit
180
+ 2024-05-30 07:05:30,526 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit
181
+ 2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: poll_exit
182
+ 2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: server_info
183
+ 2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: get_summary
184
+ 2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: sampled_history
185
+ 2024-05-30 07:05:30,526 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: internal_messages
186
+ 2024-05-30 07:05:30,527 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: poll_exit
187
+ 2024-05-30 07:05:30,527 DEBUG SenderThread:900 [sender.py:send_request():405] send_request: server_info
188
+ 2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():3994] rendering history
189
+ 2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
190
+ 2024-05-30 07:05:30,577 INFO MainThread:900 [wandb_run.py:_footer_sync_info():3953] logging synced files
191
+ 2024-05-30 07:05:30,578 DEBUG HandlerThread:900 [handler.py:handle_request():158] handle_request: shutdown
192
+ 2024-05-30 07:05:30,578 INFO HandlerThread:900 [handler.py:finish():882] shutting down handler
193
+ 2024-05-30 07:05:31,527 INFO WriterThread:900 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb
194
+ 2024-05-30 07:05:31,577 INFO SenderThread:900 [sender.py:finish():1545] shutting down sender
195
+ 2024-05-30 07:05:31,577 INFO SenderThread:900 [file_pusher.py:finish():169] shutting down file pusher
196
+ 2024-05-30 07:05:31,577 INFO SenderThread:900 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-30 07:04:47,379 INFO MainThread:744 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Configure stats pid to 744
3
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-30 07:04:47,380 WARNING MainThread:744 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug.log
11
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/logs/debug-internal.log
12
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():610] starting backend
16
+ 2024-05-30 07:04:47,380 INFO MainThread:744 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-30 07:04:47,384 INFO MainThread:744 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-30 07:04:47,385 INFO MainThread:744 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-30 07:04:47,389 INFO MainThread:744 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-30 07:04:47,398 INFO MainThread:744 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-30 07:04:47,695 INFO MainThread:744 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-30 07:04:47,809 INFO MainThread:744 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-30 07:04:47,809 INFO MainThread:744 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-30 07:04:48,086 INFO MainThread:744 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-30 07:04:48,087 INFO MainThread:744 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-30 07:05:31,579 WARNING MsgRouterThr:744 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/run-fi4sos5j.wandb ADDED
Binary file (12 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_torch_available,
21
+ )
22
+
23
+
24
+ _import_structure = {
25
+ "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_gpt_bigcode"] = [
35
+ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "GPTBigCodeForSequenceClassification",
37
+ "GPTBigCodeForTokenClassification",
38
+ "GPTBigCodeForCausalLM",
39
+ "GPTBigCodeModel",
40
+ "GPTBigCodePreTrainedModel",
41
+ ]
42
+
43
+ if TYPE_CHECKING:
44
+ from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_gpt_bigcode import (
53
+ GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ GPTBigCodeForCausalLM,
55
+ GPTBigCodeForSequenceClassification,
56
+ GPTBigCodeForTokenClassification,
57
+ GPTBigCodeModel,
58
+ GPTBigCodePreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/configuration_gpt_bigcode.cpython-310.pyc ADDED
Binary file (5.52 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/__pycache__/modeling_gpt_bigcode.cpython-310.pyc ADDED
Binary file (38.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/configuration_gpt_bigcode.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The BigCode team and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPTBigCode configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
25
+
26
+
27
+ class GPTBigCodeConfig(PretrainedConfig):
28
+ """
29
+ This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a
30
+ GPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the GPTBigCode
32
+ [gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 50257):
40
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`GPTBigCodeModel`].
42
+ n_positions (`int`, *optional*, defaults to 1024):
43
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
44
+ just in case (e.g., 512 or 1024 or 2048).
45
+ n_embd (`int`, *optional*, defaults to 768):
46
+ Dimensionality of the embeddings and hidden states.
47
+ n_layer (`int`, *optional*, defaults to 12):
48
+ Number of hidden layers in the Transformer encoder.
49
+ n_head (`int`, *optional*, defaults to 12):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ n_inner (`int`, *optional*, defaults to None):
52
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
53
+ activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`):
54
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new",
55
+ "gelu_pytorch_tanh"]`.
56
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
57
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
58
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
59
+ The dropout ratio for the embeddings.
60
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention.
62
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
63
+ The epsilon to use in the layer normalization layers.
64
+ initializer_range (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
67
+ Scale attention weights by dividing by sqrt(hidden_size)..
68
+ use_cache (`bool`, *optional*, defaults to `True`):
69
+ Whether or not the model should return the last key/values attentions (not used by all models).
70
+ attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
71
+ Whether to call the fused softmax in float32.
72
+ scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):
73
+ Whether to scale the attention softmax in float32.
74
+ attention_type (`bool`, *optional*, defaults to `True`):
75
+ Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import GPTBigCodeConfig, GPTBigCodeModel
80
+
81
+ >>> # Initializing a GPTBigCode configuration
82
+ >>> configuration = GPTBigCodeConfig()
83
+
84
+ >>> # Initializing a model (with random weights) from the configuration
85
+ >>> model = GPTBigCodeModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "gpt_bigcode"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+ attribute_map = {
94
+ "hidden_size": "n_embd",
95
+ "max_position_embeddings": "n_positions",
96
+ "num_attention_heads": "n_head",
97
+ "num_hidden_layers": "n_layer",
98
+ }
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_size=50257,
103
+ n_positions=1024,
104
+ n_embd=768,
105
+ n_layer=12,
106
+ n_head=12,
107
+ n_inner=None,
108
+ activation_function="gelu_pytorch_tanh",
109
+ resid_pdrop=0.1,
110
+ embd_pdrop=0.1,
111
+ attn_pdrop=0.1,
112
+ layer_norm_epsilon=1e-5,
113
+ initializer_range=0.02,
114
+ scale_attn_weights=True,
115
+ use_cache=True,
116
+ bos_token_id=50256,
117
+ eos_token_id=50256,
118
+ attention_softmax_in_fp32=True,
119
+ scale_attention_softmax_in_fp32=True,
120
+ multi_query=True,
121
+ **kwargs,
122
+ ):
123
+ self.vocab_size = vocab_size
124
+ self.n_positions = n_positions
125
+ self.n_embd = n_embd
126
+ self.n_layer = n_layer
127
+ self.n_head = n_head
128
+ self.n_inner = n_inner
129
+ self.activation_function = activation_function
130
+ self.resid_pdrop = resid_pdrop
131
+ self.embd_pdrop = embd_pdrop
132
+ self.attn_pdrop = attn_pdrop
133
+ self.layer_norm_epsilon = layer_norm_epsilon
134
+ self.initializer_range = initializer_range
135
+ self.scale_attn_weights = scale_attn_weights
136
+ self.use_cache = use_cache
137
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
138
+ self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32
139
+ self.multi_query = multi_query
140
+
141
+ self.bos_token_id = bos_token_id
142
+ self.eos_token_id = eos_token_id
143
+
144
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py ADDED
@@ -0,0 +1,1504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Bigcode team and HuggingFace Inc. team.
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """PyTorch GPTBigCode model."""
15
+ import math
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithPastAndCrossAttentions,
28
+ CausalLMOutputWithCrossAttentions,
29
+ SequenceClassifierOutputWithPast,
30
+ TokenClassifierOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import is_torch_greater_or_equal_than_2_2
34
+ from ...utils import (
35
+ add_code_sample_docstrings,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ is_flash_attn_2_available,
39
+ is_flash_attn_greater_or_equal_2_10,
40
+ logging,
41
+ )
42
+ from .configuration_gpt_bigcode import GPTBigCodeConfig
43
+
44
+
45
+ if is_flash_attn_2_available():
46
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
47
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "bigcode/gpt_bigcode-santacoder"
53
+ _CONFIG_FOR_DOC = "GPTBigCodeConfig"
54
+
55
+
56
+ from ..deprecated._archive_maps import GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
57
+
58
+
59
+ # Fused kernels
60
+ # Use separate functions for each case because conditionals prevent kernel fusion.
61
+ # TODO: Could have better fused kernels depending on scaling, dropout and head mask.
62
+ # Is it doable without writing 32 functions?
63
+ @torch.jit.script
64
+ def upcast_masked_softmax(
65
+ x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype
66
+ ):
67
+ input_dtype = x.dtype
68
+ x = x.to(softmax_dtype) * scale
69
+ x = torch.where(mask, x, mask_value)
70
+ x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype)
71
+ return x
72
+
73
+
74
+ @torch.jit.script
75
+ def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype):
76
+ input_dtype = x.dtype
77
+ x = x.to(softmax_dtype) * scale
78
+ x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype)
79
+ return x
80
+
81
+
82
+ @torch.jit.script
83
+ def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor):
84
+ x = torch.where(mask, x, mask_value)
85
+ x = torch.nn.functional.softmax(x, dim=-1)
86
+ return x
87
+
88
+
89
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
90
+ def _get_unpad_data(attention_mask):
91
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
92
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
93
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
94
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
95
+ return (
96
+ indices,
97
+ cu_seqlens,
98
+ max_seqlen_in_batch,
99
+ )
100
+
101
+
102
+ class GPTBigCodeAttention(nn.Module):
103
+ def __init__(self, config, is_cross_attention=False, layer_idx=None):
104
+ super().__init__()
105
+ self.config = config
106
+
107
+ self.mask_value = None
108
+ self.multi_query = config.multi_query
109
+ self.embed_dim = config.hidden_size
110
+ self.num_heads = config.num_attention_heads
111
+ self.head_dim = self.embed_dim // self.num_heads
112
+ self.kv_heads = 1 if self.multi_query else self.num_heads
113
+ self.kv_dim = self.kv_heads * self.head_dim
114
+ self.split_size = self.embed_dim
115
+ self.is_causal = True
116
+
117
+ if self.head_dim * self.num_heads != self.embed_dim:
118
+ raise ValueError(
119
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
120
+ f" {self.num_heads})."
121
+ )
122
+
123
+ self.scale_attn_weights = config.scale_attn_weights
124
+ self.is_cross_attention = is_cross_attention
125
+
126
+ self.layer_idx = layer_idx
127
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
128
+ self.scale_attention_softmax_in_fp32 = (
129
+ config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32
130
+ )
131
+ self.attn_pdrop = config.attn_pdrop
132
+
133
+ if self.is_cross_attention:
134
+ if self.multi_query:
135
+ raise NotImplementedError("Multi-Query Attention not supported for cross_attention")
136
+
137
+ self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim)
138
+ self.q_attn = nn.Linear(self.embed_dim, self.embed_dim)
139
+ else:
140
+ self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim)
141
+
142
+ self.c_proj = nn.Linear(self.embed_dim, self.embed_dim)
143
+
144
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
145
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
146
+
147
+ def _get_mask_value(self, device, dtype):
148
+ # torch.where expects a tensor. We use a cache to avoid recreating it every time.
149
+ if self.mask_value is None or self.mask_value.dtype != dtype or self.mask_value.device != device:
150
+ self.mask_value = torch.full([], torch.finfo(dtype).min, dtype=dtype, device=device)
151
+ return self.mask_value
152
+
153
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
154
+ dtype = query.dtype
155
+ softmax_dtype = torch.float32 if self.attention_softmax_in_fp32 else dtype
156
+ upcast = dtype != softmax_dtype
157
+
158
+ unscale = self.layer_idx + 1 if self.scale_attention_softmax_in_fp32 and upcast else 1
159
+ scale_factor = unscale**-1
160
+ if self.scale_attn_weights:
161
+ scale_factor /= self.head_dim**0.5
162
+
163
+ # MQA models: (batch_size, query_length, num_heads * head_dim)
164
+ # MHA models: (batch_size, num_heads, query_length, head_dim)
165
+ query_shape = query.shape
166
+ batch_size = query_shape[0]
167
+ key_length = key.size(-1)
168
+ if self.multi_query:
169
+ # (batch_size, query_length, num_heads, head_dim) x (batch_size, head_dim, key_length)
170
+ # -> (batch_size, query_length, num_heads, key_length)
171
+ query_length = query_shape[1]
172
+ attn_shape = (batch_size, query_length, self.num_heads, key_length)
173
+ attn_view = (batch_size, query_length * self.num_heads, key_length)
174
+ # No copy needed for MQA 2, or when layer_past is provided.
175
+ query = query.reshape(batch_size, query_length * self.num_heads, self.head_dim)
176
+ else:
177
+ # (batch_size, num_heads, query_length, head_dim) x (batch_size, num_heads, head_dim, key_length)
178
+ # -> (batch_size, num_heads, query_length, key_length)
179
+ query_length = query_shape[2]
180
+ attn_shape = (batch_size, self.num_heads, query_length, key_length)
181
+ attn_view = (batch_size * self.num_heads, query_length, key_length)
182
+ # Always copies
183
+ query = query.reshape(batch_size * self.num_heads, query_length, self.head_dim)
184
+ # No copy when layer_past is provided.
185
+ key = key.reshape(batch_size * self.num_heads, self.head_dim, key_length)
186
+
187
+ attn_weights = torch.empty(attn_view, device=query.device, dtype=query.dtype)
188
+ if query.device.type == "cpu":
189
+ # This is needed because of a bug in pytorch https://github.com/pytorch/pytorch/issues/80588.
190
+ # The bug was fixed in https://github.com/pytorch/pytorch/pull/96086,
191
+ # but the fix has not been released as of pytorch version 2.0.0.
192
+ attn_weights = torch.zeros_like(attn_weights)
193
+ beta = 1
194
+ else:
195
+ beta = 0
196
+ attn_weights = torch.baddbmm(attn_weights, query, key, beta=beta, alpha=scale_factor).view(attn_shape)
197
+
198
+ if upcast:
199
+ # Use a fused kernel to prevent a large overhead from casting and scaling.
200
+ # Sub-optimal when the key length is not a multiple of 8.
201
+ if attention_mask is None:
202
+ attn_weights = upcast_softmax(attn_weights, unscale, softmax_dtype)
203
+ else:
204
+ mask_value = self._get_mask_value(attn_weights.device, softmax_dtype)
205
+ attn_weights = upcast_masked_softmax(attn_weights, attention_mask, mask_value, unscale, softmax_dtype)
206
+ else:
207
+ if attention_mask is not None:
208
+ mask_value = self._get_mask_value(attn_weights.device, softmax_dtype)
209
+
210
+ # The fused kernel is very slow when the key length is not a multiple of 8, so we skip fusion.
211
+ attn_weights = torch.where(attention_mask, attn_weights, mask_value)
212
+
213
+ attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1)
214
+
215
+ attn_weights = self.attn_dropout(attn_weights)
216
+
217
+ # Mask heads if we want to
218
+ if head_mask is not None:
219
+ if self.multi_query:
220
+ head_mask = head_mask.transpose(1, 2)
221
+ attn_weights = attn_weights * head_mask
222
+
223
+ if self.multi_query:
224
+ attn_output = torch.bmm(attn_weights.view(attn_view), value).view(query_shape)
225
+ else:
226
+ attn_output = torch.matmul(attn_weights, value)
227
+
228
+ return attn_output, attn_weights
229
+
230
+ def forward(
231
+ self,
232
+ hidden_states: torch.Tensor,
233
+ layer_past: Optional[torch.Tensor] = None,
234
+ attention_mask: Optional[torch.Tensor] = None,
235
+ head_mask: Optional[torch.Tensor] = None,
236
+ encoder_hidden_states: Optional[torch.Tensor] = None,
237
+ encoder_attention_mask: Optional[torch.Tensor] = None,
238
+ use_cache: Optional[bool] = False,
239
+ output_attentions: Optional[bool] = False,
240
+ ) -> Union[
241
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
242
+ Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
243
+ ]:
244
+ if encoder_hidden_states is not None:
245
+ if not hasattr(self, "q_attn") or not self.is_cross_attention:
246
+ raise ValueError(
247
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
248
+ "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
249
+ )
250
+
251
+ query = self.q_attn(hidden_states)
252
+ key_value = self.c_attn(encoder_hidden_states)
253
+ attention_mask = encoder_attention_mask
254
+ elif self.multi_query:
255
+ query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
256
+ else:
257
+ # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
258
+ # i.e., the memory layout is not the same as GPT2.
259
+ # This makes the concatenation with past_key_value more efficient.
260
+ query, key_value = (
261
+ self.c_attn(hidden_states)
262
+ .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
263
+ .transpose(1, 2)
264
+ .split((self.head_dim, 2 * self.head_dim), dim=3)
265
+ )
266
+
267
+ if layer_past is not None:
268
+ key_value = torch.cat((layer_past, key_value), dim=-2)
269
+ present = key_value if use_cache else None
270
+
271
+ key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
272
+
273
+ attn_output, attn_weights = self._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask)
274
+
275
+ if not self.multi_query:
276
+ attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape)
277
+ attn_output = self.c_proj(attn_output)
278
+ attn_output = self.resid_dropout(attn_output)
279
+
280
+ outputs = (attn_output, present)
281
+ if output_attentions:
282
+ if self.multi_query:
283
+ # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
284
+ attn_weights = attn_weights.transpose(1, 2)
285
+ outputs += (attn_weights,)
286
+
287
+ return outputs # a, present, (attentions)
288
+
289
+
290
+ class GPTBigCodeFlashAttention2(GPTBigCodeAttention):
291
+ """
292
+ GPTBigCode flash attention module. This module inherits from `GPTBigCodeAttention` as the weights of the module
293
+ stays untouched. The only required change would be on the forward pass where it needs to correctly call the public
294
+ API of flash attention and deal with padding tokens in case the input contains any of them.
295
+ """
296
+
297
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
298
+ def __init__(self, *args, **kwargs):
299
+ super().__init__(*args, **kwargs)
300
+
301
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
302
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
303
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
304
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
305
+
306
+ def forward(
307
+ self,
308
+ hidden_states: torch.Tensor,
309
+ layer_past: Optional[torch.Tensor] = None,
310
+ attention_mask: Optional[torch.Tensor] = None,
311
+ head_mask: Optional[torch.Tensor] = None,
312
+ encoder_hidden_states: Optional[torch.Tensor] = None,
313
+ encoder_attention_mask: Optional[torch.Tensor] = None,
314
+ use_cache: Optional[bool] = False,
315
+ output_attentions: Optional[bool] = False,
316
+ ) -> Union[
317
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
318
+ Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
319
+ ]:
320
+ if encoder_hidden_states is not None:
321
+ if not hasattr(self, "q_attn") or not self.is_cross_attention:
322
+ raise ValueError(
323
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
324
+ "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
325
+ )
326
+
327
+ query = self.q_attn(hidden_states)
328
+ key_value = self.c_attn(encoder_hidden_states)
329
+ attention_mask = encoder_attention_mask
330
+ elif self.multi_query:
331
+ query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
332
+ else:
333
+ # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
334
+ # i.e., the memory layout is not the same as GPT2.
335
+ # This makes the concatenation with past_key_value more efficient.
336
+ query, key_value = (
337
+ self.c_attn(hidden_states)
338
+ .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
339
+ .transpose(1, 2)
340
+ .split((self.head_dim, 2 * self.head_dim), dim=3)
341
+ )
342
+
343
+ if layer_past is not None:
344
+ key_value = torch.cat((layer_past, key_value), dim=-2)
345
+ present = key_value if use_cache else None
346
+
347
+ key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
348
+
349
+ # Flash attention requires the input to have the shape
350
+ # batch_size x seq_length x head_dim x hidden_dim
351
+ if self.multi_query:
352
+ batch_size, query_length, _ = query.shape
353
+ query = query.reshape(batch_size, query_length, self.num_heads, self.head_dim)
354
+ key = key.unsqueeze(2)
355
+ value = value.unsqueeze(2)
356
+ else:
357
+ query_length = query.shape[2]
358
+ batch_size, _, tgt, _ = key.shape
359
+ query = query.transpose(1, 2).reshape(batch_size, query_length, self.num_heads, self.head_dim)
360
+ key = key.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim)
361
+ value = value.transpose(1, 2).reshape(batch_size, tgt, self.num_heads, self.head_dim)
362
+
363
+ attn_dropout = self.attn_pdrop if self.training else 0.0
364
+
365
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
366
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
367
+ # cast them back in float16 just to be sure everything works as expected.
368
+ input_dtype = query.dtype
369
+ if input_dtype == torch.float32:
370
+ if torch.is_autocast_enabled():
371
+ target_dtype = torch.get_autocast_gpu_dtype()
372
+ # Handle the case where the model is quantized
373
+ elif hasattr(self.config, "_pre_quantization_dtype"):
374
+ target_dtype = self.config._pre_quantization_dtype
375
+ else:
376
+ target_dtype = self.c_attn.weight.dtype
377
+
378
+ logger.warning_once(
379
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
380
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
381
+ f" {target_dtype}."
382
+ )
383
+ query = query.to(target_dtype)
384
+ key = key.to(target_dtype)
385
+ value = value.to(target_dtype)
386
+
387
+ attn_output = self._flash_attention_forward(
388
+ query, key, value, attention_mask, query_length, dropout=attn_dropout
389
+ )
390
+
391
+ attn_weights_reshaped = attn_output.reshape(batch_size, query_length, self.num_heads * self.head_dim)
392
+ attn_output = self.c_proj(attn_weights_reshaped)
393
+ attn_output = self.resid_dropout(attn_output)
394
+
395
+ outputs = (attn_output, present)
396
+
397
+ if output_attentions:
398
+ if self.multi_query:
399
+ # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
400
+ attn_weights_reshaped = attn_weights_reshaped.transpose(1, 2)
401
+ else:
402
+ attn_weights_reshaped = None
403
+
404
+ outputs += (attn_weights_reshaped,)
405
+
406
+ return outputs # a, present, (attentions)
407
+
408
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
409
+ def _flash_attention_forward(
410
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
411
+ ):
412
+ """
413
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
414
+ first unpad the input, then computes the attention scores and pad the final attention scores.
415
+
416
+ Args:
417
+ query_states (`torch.Tensor`):
418
+ Input query states to be passed to Flash Attention API
419
+ key_states (`torch.Tensor`):
420
+ Input key states to be passed to Flash Attention API
421
+ value_states (`torch.Tensor`):
422
+ Input value states to be passed to Flash Attention API
423
+ attention_mask (`torch.Tensor`):
424
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
425
+ position of padding tokens and 1 for the position of non-padding tokens.
426
+ dropout (`float`):
427
+ Attention dropout
428
+ softmax_scale (`float`, *optional*):
429
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
430
+ """
431
+ if not self._flash_attn_uses_top_left_mask:
432
+ causal = self.is_causal
433
+ else:
434
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
435
+ causal = self.is_causal and query_length != 1
436
+
437
+ # Contains at least one padding token in the sequence
438
+ if attention_mask is not None:
439
+ batch_size = query_states.shape[0]
440
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
441
+ query_states, key_states, value_states, attention_mask, query_length
442
+ )
443
+
444
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
445
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
446
+
447
+ attn_output_unpad = flash_attn_varlen_func(
448
+ query_states,
449
+ key_states,
450
+ value_states,
451
+ cu_seqlens_q=cu_seqlens_q,
452
+ cu_seqlens_k=cu_seqlens_k,
453
+ max_seqlen_q=max_seqlen_in_batch_q,
454
+ max_seqlen_k=max_seqlen_in_batch_k,
455
+ dropout_p=dropout,
456
+ softmax_scale=softmax_scale,
457
+ causal=causal,
458
+ )
459
+
460
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
461
+ else:
462
+ attn_output = flash_attn_func(
463
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
464
+ )
465
+
466
+ return attn_output
467
+
468
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
469
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
470
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
471
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
472
+
473
+ key_layer = index_first_axis(
474
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
475
+ )
476
+ value_layer = index_first_axis(
477
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
478
+ )
479
+ if query_length == kv_seq_len:
480
+ query_layer = index_first_axis(
481
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
482
+ )
483
+ cu_seqlens_q = cu_seqlens_k
484
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
485
+ indices_q = indices_k
486
+ elif query_length == 1:
487
+ max_seqlen_in_batch_q = 1
488
+ cu_seqlens_q = torch.arange(
489
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
490
+ ) # There is a memcpy here, that is very bad.
491
+ indices_q = cu_seqlens_q[:-1]
492
+ query_layer = query_layer.squeeze(1)
493
+ else:
494
+ # The -q_len: slice assumes left padding.
495
+ attention_mask = attention_mask[:, -query_length:]
496
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
497
+
498
+ return (
499
+ query_layer,
500
+ key_layer,
501
+ value_layer,
502
+ indices_q,
503
+ (cu_seqlens_q, cu_seqlens_k),
504
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
505
+ )
506
+
507
+
508
+ class GPTBigCodeSdpaAttention(GPTBigCodeAttention):
509
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
510
+ if head_mask is not None:
511
+ # The super dispatch is done in the forward.
512
+ raise ValueError(
513
+ "PyTorch SDPA does not support head_mask. Please open an issue in Transformers repository."
514
+ )
515
+
516
+ scale = None
517
+ if not self.scale_attn_weights:
518
+ scale = 1
519
+
520
+ # MQA models: (batch_size, query_length, num_heads * head_dim)
521
+ # MHA models: (batch_size, num_heads, query_length, head_dim)
522
+ query_shape = query.shape
523
+ batch_size = query_shape[0]
524
+ key.shape[-2]
525
+
526
+ if self.multi_query:
527
+ query_length = query_shape[1]
528
+
529
+ # SDPA requires the dimension [..., sequence_length, head_dim].
530
+ query = query.view(batch_size, query_length, self.num_heads, self.head_dim).transpose(1, 2)
531
+
532
+ # Without these unsqueeze, SDPA complains as the query and key/value have a different number of dimensions.
533
+ key = key.unsqueeze(1)
534
+ value = value.unsqueeze(1)
535
+
536
+ # Although these expand are not numerically useful, PyTorch can not dispatch to memory-efficient backend
537
+ # and flash attention backend (No available kernel. Aborting execution.) from the shapes
538
+ # query = [batch_size, num_heads, query_length, head_dim]
539
+ # key = [batch_size, 1, past_length, head_dim]
540
+ # value = [batch_size, 1, past_length, head_dim]
541
+ #
542
+ # torch==2.1.2 is bugged with non-contiguous inputs with custom attn_mask (https://github.com/pytorch/pytorch/issues/112577), hence the check.
543
+ if is_torch_greater_or_equal_than_2_2:
544
+ key = key.expand(-1, self.num_heads, -1, -1)
545
+ value = value.expand(-1, self.num_heads, -1, -1)
546
+ else:
547
+ query_length = query_shape[-1]
548
+
549
+ # See the comment above.
550
+ if query.device.type == "cuda" and attention_mask is not None:
551
+ query = query.contiguous()
552
+ key = key.contiguous()
553
+ value = value.contiguous()
554
+
555
+ sdpa_result = torch.nn.functional.scaled_dot_product_attention(
556
+ query,
557
+ key,
558
+ value,
559
+ attn_mask=attention_mask,
560
+ dropout_p=self.attn_pdrop if self.training else 0.0,
561
+ # The query_length > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case query_length == 1.
562
+ is_causal=self.is_causal and attention_mask is None and query_length > 1,
563
+ scale=scale,
564
+ )
565
+
566
+ if self.multi_query:
567
+ # (batch_size, num_heads, seq_len, head_dim) --> (batch_size, seq_len, num_heads, head_dim)
568
+ sdpa_result = sdpa_result.transpose(1, 2)
569
+
570
+ # Reshape is kind of expensive here, as it does a memory copy,
571
+ # but I did not manage to make away without it (logits do not match when using view)
572
+ # (batch_size, seq_len, num_heads, head_dim) --> (batch_size, seq_len, num_heads * head_dim)
573
+ sdpa_result = sdpa_result.reshape(query_shape)
574
+
575
+ return sdpa_result, None
576
+
577
+ def forward(
578
+ self,
579
+ hidden_states: torch.Tensor,
580
+ layer_past: Optional[torch.Tensor] = None,
581
+ attention_mask: Optional[torch.Tensor] = None,
582
+ head_mask: Optional[torch.Tensor] = None,
583
+ encoder_hidden_states: Optional[torch.Tensor] = None,
584
+ encoder_attention_mask: Optional[torch.Tensor] = None,
585
+ use_cache: Optional[bool] = False,
586
+ output_attentions: Optional[bool] = False,
587
+ ) -> Union[
588
+ Tuple[torch.Tensor, Optional[torch.Tensor]],
589
+ Tuple[torch.Tensor, Optional[torch.Tensor], Tuple[torch.Tensor, ...]],
590
+ ]:
591
+ if encoder_hidden_states is not None:
592
+ if not hasattr(self, "q_attn") or not self.is_cross_attention:
593
+ raise ValueError(
594
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
595
+ "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`."
596
+ )
597
+
598
+ query = self.q_attn(hidden_states)
599
+ key_value = self.c_attn(encoder_hidden_states)
600
+ attention_mask = encoder_attention_mask
601
+ elif self.multi_query:
602
+ query, key_value = self.c_attn(hidden_states).split((self.embed_dim, 2 * self.kv_dim), dim=2)
603
+ else:
604
+ # Note: We split as (self.num_heads, 3, self.head_dim) instead of (3, self.num_heads, self.head_dim),
605
+ # i.e., the memory layout is not the same as GPT2.
606
+ # This makes the concatenation with past_key_value more efficient.
607
+ query, key_value = (
608
+ self.c_attn(hidden_states)
609
+ .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim)
610
+ .transpose(1, 2)
611
+ .split((self.head_dim, 2 * self.head_dim), dim=3)
612
+ )
613
+
614
+ if layer_past is not None:
615
+ key_value = torch.cat((layer_past, key_value), dim=-2)
616
+ present = key_value if use_cache else None
617
+
618
+ key, value = key_value.split((self.head_dim, self.head_dim), dim=-1)
619
+
620
+ if not output_attentions and head_mask is None:
621
+ # Difference with the original implementation: there is no need to transpose the key here,
622
+ # as SDPA expects seq_length to be at index -2 for the key as well
623
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
624
+ else:
625
+ # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
626
+ logger.warning_once(
627
+ "GPTBigCodeModel is using GPTBigCodeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` and `head_mask` not None."
628
+ ' Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
629
+ )
630
+ attn_output, attn_weights = super()._attn(query, key.transpose(-1, -2), value, attention_mask, head_mask)
631
+
632
+ if not self.multi_query:
633
+ attn_output = attn_output.transpose(1, 2).reshape(hidden_states.shape)
634
+ attn_output = self.c_proj(attn_output)
635
+ attn_output = self.resid_dropout(attn_output)
636
+
637
+ outputs = (attn_output, present)
638
+ if output_attentions:
639
+ if self.multi_query:
640
+ # Transpose to return weights in the usual format (batch_size, num_heads, query_length, key_length)
641
+ attn_weights = attn_weights.transpose(1, 2)
642
+ outputs += (attn_weights,)
643
+
644
+ return outputs
645
+
646
+
647
+ class GPTBigCodeMLP(nn.Module):
648
+ def __init__(self, intermediate_size, config):
649
+ super().__init__()
650
+ embed_dim = config.hidden_size
651
+ self.c_fc = nn.Linear(embed_dim, intermediate_size)
652
+ self.c_proj = nn.Linear(intermediate_size, embed_dim)
653
+ self.act = ACT2FN[config.activation_function]
654
+ self.dropout = nn.Dropout(config.resid_pdrop)
655
+
656
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward
657
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
658
+ hidden_states = self.c_fc(hidden_states)
659
+ hidden_states = self.act(hidden_states)
660
+ hidden_states = self.c_proj(hidden_states)
661
+ hidden_states = self.dropout(hidden_states)
662
+ return hidden_states
663
+
664
+
665
+ GPTBIGCODE_ATTENTION_CLASSES = {
666
+ "eager": GPTBigCodeAttention,
667
+ "flash_attention_2": GPTBigCodeFlashAttention2,
668
+ "sdpa": GPTBigCodeSdpaAttention,
669
+ }
670
+
671
+
672
+ class GPTBigCodeBlock(nn.Module):
673
+ def __init__(self, config, layer_idx=None):
674
+ super().__init__()
675
+ hidden_size = config.hidden_size
676
+ self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
677
+
678
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
679
+
680
+ self.attn = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
681
+
682
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
683
+
684
+ if config.add_cross_attention:
685
+ if config.multi_query:
686
+ raise NotImplementedError("Cross-attention not implemented for MQA")
687
+
688
+ self.crossattention = GPTBIGCODE_ATTENTION_CLASSES[config._attn_implementation](
689
+ config, is_cross_attention=True, layer_idx=layer_idx
690
+ )
691
+
692
+ self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
693
+
694
+ self.mlp = GPTBigCodeMLP(self.inner_dim, config)
695
+
696
+ def forward(
697
+ self,
698
+ hidden_states: Optional[Tuple[torch.Tensor]],
699
+ layer_past: Optional[torch.Tensor] = None,
700
+ attention_mask: Optional[torch.Tensor] = None,
701
+ head_mask: Optional[torch.Tensor] = None,
702
+ encoder_hidden_states: Optional[torch.Tensor] = None,
703
+ encoder_attention_mask: Optional[torch.Tensor] = None,
704
+ use_cache: Optional[bool] = False,
705
+ output_attentions: Optional[bool] = False,
706
+ ) -> Union[
707
+ Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
708
+ ]:
709
+ residual = hidden_states
710
+ hidden_states = self.ln_1(hidden_states)
711
+ attn_outputs = self.attn(
712
+ hidden_states,
713
+ layer_past=layer_past,
714
+ attention_mask=attention_mask,
715
+ head_mask=head_mask,
716
+ use_cache=use_cache,
717
+ output_attentions=output_attentions,
718
+ )
719
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
720
+ outputs = attn_outputs[1:]
721
+ # residual connection
722
+ hidden_states = attn_output + residual
723
+
724
+ if encoder_hidden_states is not None:
725
+ # add one self-attention block for cross-attention
726
+ if not hasattr(self, "crossattention"):
727
+ raise ValueError(
728
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
729
+ "cross-attention layers by setting `config.add_cross_attention=True`"
730
+ )
731
+ residual = hidden_states
732
+ hidden_states = self.ln_cross_attn(hidden_states)
733
+ cross_attn_outputs = self.crossattention(
734
+ hidden_states,
735
+ attention_mask=attention_mask,
736
+ head_mask=head_mask,
737
+ encoder_hidden_states=encoder_hidden_states,
738
+ encoder_attention_mask=encoder_attention_mask,
739
+ output_attentions=output_attentions,
740
+ )
741
+ attn_output = cross_attn_outputs[0]
742
+ # residual connection
743
+ hidden_states = residual + attn_output
744
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
745
+
746
+ residual = hidden_states
747
+ hidden_states = self.ln_2(hidden_states)
748
+ feed_forward_hidden_states = self.mlp(hidden_states)
749
+ # residual connection
750
+ hidden_states = residual + feed_forward_hidden_states
751
+
752
+ if use_cache:
753
+ outputs = (hidden_states,) + outputs
754
+ else:
755
+ outputs = (hidden_states,) + outputs[1:]
756
+
757
+ return outputs # hidden_states, present, (attentions, cross_attentions)
758
+
759
+
760
+ class GPTBigCodePreTrainedModel(PreTrainedModel):
761
+ """
762
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
763
+ models.
764
+ """
765
+
766
+ config_class = GPTBigCodeConfig
767
+ base_model_prefix = "transformer"
768
+ supports_gradient_checkpointing = True
769
+ _no_split_modules = ["GPTBigCodeBlock"]
770
+ _skip_keys_device_placement = "past_key_values"
771
+ _supports_flash_attn_2 = True
772
+ _supports_sdpa = True
773
+
774
+ def __init__(self, *inputs, **kwargs):
775
+ super().__init__(*inputs, **kwargs)
776
+
777
+ def _init_weights(self, module):
778
+ """Initialize the weights."""
779
+ if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)):
780
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
781
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
782
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
783
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
784
+ #
785
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
786
+ module.c_proj.weight.data.normal_(
787
+ mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer))
788
+ )
789
+ module.c_proj._is_hf_initialized = True
790
+ elif isinstance(module, nn.Linear):
791
+ # Slightly different from the TF version which uses truncated_normal for initialization
792
+ # cf https://github.com/pytorch/pytorch/pull/5617
793
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
794
+ if module.bias is not None:
795
+ module.bias.data.zero_()
796
+ elif isinstance(module, nn.Embedding):
797
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
798
+ if module.padding_idx is not None:
799
+ module.weight.data[module.padding_idx].zero_()
800
+ elif isinstance(module, nn.LayerNorm):
801
+ module.bias.data.zero_()
802
+ module.weight.data.fill_(1.0)
803
+
804
+
805
+ GPT_BIGCODE_START_DOCSTRING = r"""
806
+
807
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
808
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
809
+ etc.)
810
+
811
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
812
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
813
+ and behavior.
814
+
815
+ Parameters:
816
+ config ([`GPTBigCodeConfig`]): Model configuration class with all the parameters of the model.
817
+ Initializing with a config file does not load the weights associated with the model, only the
818
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
819
+ """
820
+
821
+ GPT_BIGCODE_INPUTS_DOCSTRING = r"""
822
+ Args:
823
+ input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`):
824
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
825
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
826
+ sequence tokens in the vocabulary.
827
+
828
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
829
+ `input_ids`.
830
+
831
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
832
+ [`PreTrainedTokenizer.__call__`] for details.
833
+
834
+ [What are input IDs?](../glossary#input-ids)
835
+ past_key_values (`Tuple[torch.Tensor]` of length `config.n_layers`):
836
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
837
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
838
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
839
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
840
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
841
+
842
+ - 1 for tokens that are **not masked**,
843
+ - 0 for tokens that are **masked**.
844
+
845
+ If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
846
+ `past_key_values`. In other words, the `attention_mask` always has to have the length:
847
+ `len(past_key_values) + len(input_ids)`
848
+
849
+ [What are attention masks?](../glossary#attention-mask)
850
+ token_type_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*):
851
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
852
+ 1]`:
853
+
854
+ - 0 corresponds to a *sentence A* token,
855
+ - 1 corresponds to a *sentence B* token.
856
+
857
+ [What are token type IDs?](../glossary#token-type-ids)
858
+ position_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
859
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
860
+ config.max_position_embeddings - 1]`.
861
+
862
+ [What are position IDs?](../glossary#position-ids)
863
+ head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
864
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
865
+
866
+ - 1 indicates the head is **not masked**,
867
+ - 0 indicates the head is **masked**.
868
+
869
+ inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
870
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
871
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
872
+ model's internal embedding lookup matrix.
873
+
874
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
875
+ `past_key_values`).
876
+ use_cache (`bool`, *optional*):
877
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
878
+ `past_key_values`).
879
+ output_attentions (`bool`, *optional*):
880
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
881
+ tensors for more detail.
882
+ output_hidden_states (`bool`, *optional*):
883
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
884
+ more detail.
885
+ return_dict (`bool`, *optional*):
886
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
887
+ """
888
+
889
+
890
+ @add_start_docstrings(
891
+ "The bare GPT_BIGCODE Model transformer outputting raw hidden-states without any specific head on top.",
892
+ GPT_BIGCODE_START_DOCSTRING,
893
+ )
894
+ class GPTBigCodeModel(GPTBigCodePreTrainedModel):
895
+ def __init__(self, config):
896
+ super().__init__(config)
897
+ self.multi_query = config.multi_query
898
+ self.embed_dim = config.hidden_size
899
+
900
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
901
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
902
+
903
+ self.drop = nn.Dropout(config.embd_pdrop)
904
+ self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
905
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
906
+
907
+ max_positions = config.max_position_embeddings
908
+ self.register_buffer(
909
+ "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False
910
+ )
911
+
912
+ self.gradient_checkpointing = False
913
+
914
+ self._use_sdpa = config._attn_implementation == "sdpa"
915
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
916
+
917
+ # Initialize weights and apply final processing
918
+ self.post_init()
919
+
920
+ def get_input_embeddings(self):
921
+ return self.wte
922
+
923
+ def set_input_embeddings(self, new_embeddings):
924
+ self.wte = new_embeddings
925
+
926
+ @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
927
+ @add_code_sample_docstrings(
928
+ checkpoint=_CHECKPOINT_FOR_DOC,
929
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
930
+ config_class=_CONFIG_FOR_DOC,
931
+ )
932
+ def forward(
933
+ self,
934
+ input_ids: Optional[torch.Tensor] = None,
935
+ past_key_values: Optional[List[torch.Tensor]] = None,
936
+ attention_mask: Optional[torch.Tensor] = None,
937
+ token_type_ids: Optional[torch.Tensor] = None,
938
+ position_ids: Optional[torch.Tensor] = None,
939
+ head_mask: Optional[torch.Tensor] = None,
940
+ inputs_embeds: Optional[torch.Tensor] = None,
941
+ encoder_hidden_states: Optional[torch.Tensor] = None,
942
+ encoder_attention_mask: Optional[torch.Tensor] = None,
943
+ use_cache: Optional[bool] = None,
944
+ output_attentions: Optional[bool] = None,
945
+ output_hidden_states: Optional[bool] = None,
946
+ return_dict: Optional[bool] = None,
947
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
948
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
949
+ output_hidden_states = (
950
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
951
+ )
952
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
953
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
954
+
955
+ if input_ids is not None and inputs_embeds is not None:
956
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
957
+ elif input_ids is not None:
958
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
959
+ input_shape = input_ids.size()
960
+ input_ids = input_ids.view(-1, input_shape[-1])
961
+ batch_size = input_ids.shape[0]
962
+ elif inputs_embeds is not None:
963
+ input_shape = inputs_embeds.size()[:-1]
964
+ batch_size = inputs_embeds.shape[0]
965
+ else:
966
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
967
+
968
+ if batch_size <= 0:
969
+ raise ValueError("batch_size has to be defined and > 0")
970
+
971
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
972
+
973
+ if token_type_ids is not None:
974
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
975
+
976
+ if past_key_values is None:
977
+ past_length = 0
978
+ past_key_values = tuple([None] * len(self.h))
979
+ else:
980
+ past_length = past_key_values[0].size(-2)
981
+
982
+ if attention_mask is not None and len(attention_mask.shape) == 2 and position_ids is None:
983
+ # create position_ids on the fly for batch generation
984
+ position_ids = attention_mask.long().cumsum(-1) - 1
985
+ position_ids.masked_fill_(attention_mask == 0, 1)
986
+ if past_length > 0:
987
+ position_ids = position_ids[:, past_length : input_shape[-1] + past_length :]
988
+ elif position_ids is None:
989
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
990
+ position_ids = position_ids.unsqueeze(0)
991
+
992
+ # Self-attention mask.
993
+ query_length = input_shape[-1]
994
+ key_length = past_length + query_length
995
+ self_attention_mask = self.bias[None, key_length - query_length : key_length, :key_length]
996
+
997
+ if self._use_flash_attention_2:
998
+ # 2d mask is passed through the layers
999
+ attention_mask = attention_mask.bool() if (attention_mask is not None and 0 in attention_mask) else None
1000
+ encoder_attention_mask = (
1001
+ encoder_attention_mask.bool()
1002
+ if (encoder_attention_mask is not None and 0 in encoder_attention_mask)
1003
+ else None
1004
+ )
1005
+ else:
1006
+ # 4d mask is passed through the layers
1007
+ if attention_mask is not None:
1008
+ self_attention_mask = self_attention_mask * attention_mask.view(batch_size, 1, -1).to(
1009
+ dtype=torch.bool, device=self_attention_mask.device
1010
+ )
1011
+
1012
+ # MQA models: (batch_size, query_length, n_heads, key_length)
1013
+ # MHA models: (batch_size, n_heads, query_length, key_length)
1014
+ self_attention_mask = self_attention_mask.unsqueeze(2 if self.multi_query else 1)
1015
+
1016
+ if self._use_sdpa and head_mask is None and not output_attentions:
1017
+ # SDPA with a custom mask is much faster in fp16/fp32 dtype rather than bool. Cast here to floating point instead of at every layer.
1018
+ dtype = self.wte.weight.dtype
1019
+ min_dtype = torch.finfo(dtype).min
1020
+ self_attention_mask = torch.where(
1021
+ self_attention_mask,
1022
+ torch.full([], 0.0, dtype=dtype, device=self_attention_mask.device),
1023
+ torch.full([], min_dtype, dtype=dtype, device=self_attention_mask.device),
1024
+ )
1025
+
1026
+ # output_attentions=True can not be supported when using SDPA, and we fall back on
1027
+ # the manual implementation that requires a 4D causal mask in all cases.
1028
+ if self.multi_query:
1029
+ # gpt_bigcode using MQA has the bad taste to use a causal mask with shape
1030
+ # [batch_size, target_length, 1, source_length], not compatible with SDPA, hence this transpose.
1031
+ self_attention_mask = self_attention_mask.transpose(1, 2)
1032
+
1033
+ if query_length > 1 and attention_mask is not None and attention_mask.device.type == "cuda":
1034
+ # From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
1035
+ # produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
1036
+ self_attention_mask = AttentionMaskConverter._unmask_unattended(
1037
+ self_attention_mask, min_dtype=min_dtype
1038
+ )
1039
+
1040
+ attention_mask = self_attention_mask
1041
+
1042
+ # If a 2D or 3D attention mask is provided for the cross-attention
1043
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
1044
+ if (
1045
+ self.config.add_cross_attention
1046
+ and encoder_hidden_states is not None
1047
+ and encoder_attention_mask is not None
1048
+ ):
1049
+ if encoder_attention_mask.dim() == 2:
1050
+ encoder_attention_mask.unsqueeze(1)
1051
+ assert encoder_attention_mask.dim() == 3
1052
+ encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1)
1053
+ else:
1054
+ encoder_attention_mask = None
1055
+
1056
+ # Prepare head mask if needed
1057
+ # 1.0 in head_mask indicate we keep the head
1058
+ # attention_probs has shape bsz x n_heads x N x N
1059
+ # head_mask has shape n_layer x batch x n_heads x N x N
1060
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
1061
+
1062
+ if inputs_embeds is None:
1063
+ inputs_embeds = self.wte(input_ids)
1064
+ position_embeds = self.wpe(position_ids)
1065
+ hidden_states = inputs_embeds + position_embeds
1066
+
1067
+ if token_type_ids is not None:
1068
+ token_type_embeds = self.wte(token_type_ids)
1069
+ hidden_states = hidden_states + token_type_embeds
1070
+
1071
+ hidden_states = self.drop(hidden_states)
1072
+
1073
+ output_shape = input_shape + (hidden_states.size(-1),)
1074
+
1075
+ presents = [] if use_cache else None
1076
+ all_self_attentions = () if output_attentions else None
1077
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1078
+ all_hidden_states = () if output_hidden_states else None
1079
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
1080
+ if output_hidden_states:
1081
+ all_hidden_states = all_hidden_states + (hidden_states,)
1082
+
1083
+ if self.gradient_checkpointing and self.training:
1084
+ outputs = self._gradient_checkpointing_func(
1085
+ block.__call__,
1086
+ hidden_states,
1087
+ None,
1088
+ attention_mask,
1089
+ head_mask[i],
1090
+ encoder_hidden_states,
1091
+ encoder_attention_mask,
1092
+ use_cache,
1093
+ output_attentions,
1094
+ )
1095
+ else:
1096
+ outputs = block(
1097
+ hidden_states,
1098
+ layer_past=layer_past,
1099
+ attention_mask=attention_mask,
1100
+ head_mask=head_mask[i],
1101
+ encoder_hidden_states=encoder_hidden_states,
1102
+ encoder_attention_mask=encoder_attention_mask,
1103
+ use_cache=use_cache,
1104
+ output_attentions=output_attentions,
1105
+ )
1106
+
1107
+ hidden_states = outputs[0]
1108
+ if use_cache:
1109
+ presents.append(outputs[1])
1110
+
1111
+ if output_attentions:
1112
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
1113
+ if self.config.add_cross_attention:
1114
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
1115
+
1116
+ hidden_states = self.ln_f(hidden_states)
1117
+
1118
+ hidden_states = hidden_states.view(output_shape)
1119
+ # Add last hidden state
1120
+ if output_hidden_states:
1121
+ all_hidden_states = all_hidden_states + (hidden_states,)
1122
+
1123
+ if not return_dict:
1124
+ return tuple(
1125
+ v
1126
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
1127
+ if v is not None
1128
+ )
1129
+
1130
+ return BaseModelOutputWithPastAndCrossAttentions(
1131
+ last_hidden_state=hidden_states,
1132
+ past_key_values=presents,
1133
+ hidden_states=all_hidden_states,
1134
+ attentions=all_self_attentions,
1135
+ cross_attentions=all_cross_attentions,
1136
+ )
1137
+
1138
+
1139
+ @add_start_docstrings(
1140
+ """
1141
+ The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input
1142
+ embeddings).
1143
+ """,
1144
+ GPT_BIGCODE_START_DOCSTRING,
1145
+ )
1146
+ class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel):
1147
+ _tied_weights_keys = ["lm_head.weight"]
1148
+
1149
+ def __init__(self, config):
1150
+ super().__init__(config)
1151
+ self.transformer = GPTBigCodeModel(config)
1152
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
1153
+
1154
+ # Initialize weights and apply final processing
1155
+ self.post_init()
1156
+
1157
+ def get_output_embeddings(self):
1158
+ return self.lm_head
1159
+
1160
+ def set_output_embeddings(self, new_embeddings):
1161
+ self.lm_head = new_embeddings
1162
+
1163
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
1164
+ token_type_ids = kwargs.get("token_type_ids", None)
1165
+ # Omit tokens covered by past_key_values
1166
+ if past_key_values:
1167
+ if self.config.multi_query:
1168
+ past_length = past_key_values[0].shape[1]
1169
+ else:
1170
+ past_length = past_key_values[0].shape[2]
1171
+
1172
+ # Some generation methods already pass only the last input ID
1173
+ if input_ids.shape[1] > past_length:
1174
+ remove_prefix_length = past_length
1175
+ else:
1176
+ # Default to old behavior: keep only final ID
1177
+ remove_prefix_length = input_ids.shape[1] - 1
1178
+
1179
+ input_ids = input_ids[:, remove_prefix_length:]
1180
+ if token_type_ids is not None:
1181
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1182
+
1183
+ attention_mask = kwargs.get("attention_mask", None)
1184
+ position_ids = kwargs.get("position_ids", None)
1185
+
1186
+ if attention_mask is not None and position_ids is None:
1187
+ # create position_ids on the fly for batch generation
1188
+ position_ids = attention_mask.long().cumsum(-1) - 1
1189
+ position_ids.masked_fill_(attention_mask == 0, 1)
1190
+ if past_key_values:
1191
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1192
+ else:
1193
+ position_ids = None
1194
+
1195
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1196
+ if inputs_embeds is not None and past_key_values is None:
1197
+ model_inputs = {"inputs_embeds": inputs_embeds}
1198
+ else:
1199
+ model_inputs = {"input_ids": input_ids}
1200
+
1201
+ model_inputs.update(
1202
+ {
1203
+ "past_key_values": past_key_values,
1204
+ "use_cache": kwargs.get("use_cache"),
1205
+ "position_ids": position_ids,
1206
+ "attention_mask": attention_mask,
1207
+ "token_type_ids": token_type_ids,
1208
+ }
1209
+ )
1210
+ return model_inputs
1211
+
1212
+ @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
1213
+ @add_code_sample_docstrings(
1214
+ checkpoint=_CHECKPOINT_FOR_DOC,
1215
+ output_type=CausalLMOutputWithCrossAttentions,
1216
+ config_class=_CONFIG_FOR_DOC,
1217
+ )
1218
+ def forward(
1219
+ self,
1220
+ input_ids: Optional[torch.Tensor] = None,
1221
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1222
+ attention_mask: Optional[torch.Tensor] = None,
1223
+ token_type_ids: Optional[torch.Tensor] = None,
1224
+ position_ids: Optional[torch.Tensor] = None,
1225
+ head_mask: Optional[torch.Tensor] = None,
1226
+ inputs_embeds: Optional[torch.Tensor] = None,
1227
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1228
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1229
+ labels: Optional[torch.Tensor] = None,
1230
+ use_cache: Optional[bool] = None,
1231
+ output_attentions: Optional[bool] = None,
1232
+ output_hidden_states: Optional[bool] = None,
1233
+ return_dict: Optional[bool] = None,
1234
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
1235
+ r"""
1236
+ labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1237
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1238
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1239
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1240
+ """
1241
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1242
+
1243
+ transformer_outputs = self.transformer(
1244
+ input_ids,
1245
+ past_key_values=past_key_values,
1246
+ attention_mask=attention_mask,
1247
+ token_type_ids=token_type_ids,
1248
+ position_ids=position_ids,
1249
+ head_mask=head_mask,
1250
+ inputs_embeds=inputs_embeds,
1251
+ encoder_hidden_states=encoder_hidden_states,
1252
+ encoder_attention_mask=encoder_attention_mask,
1253
+ use_cache=use_cache,
1254
+ output_attentions=output_attentions,
1255
+ output_hidden_states=output_hidden_states,
1256
+ return_dict=return_dict,
1257
+ )
1258
+ hidden_states = transformer_outputs[0]
1259
+
1260
+ lm_logits = self.lm_head(hidden_states)
1261
+
1262
+ loss = None
1263
+ if labels is not None:
1264
+ # Shift so that tokens < n predict n
1265
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1266
+ shift_labels = labels[..., 1:].contiguous().to(shift_logits.device)
1267
+ # Flatten the tokens
1268
+ loss_fct = CrossEntropyLoss()
1269
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1270
+
1271
+ if not return_dict:
1272
+ output = (lm_logits,) + transformer_outputs[1:]
1273
+ return ((loss,) + output) if loss is not None else output
1274
+
1275
+ return CausalLMOutputWithCrossAttentions(
1276
+ loss=loss,
1277
+ logits=lm_logits,
1278
+ past_key_values=transformer_outputs.past_key_values,
1279
+ hidden_states=transformer_outputs.hidden_states,
1280
+ attentions=transformer_outputs.attentions,
1281
+ cross_attentions=transformer_outputs.cross_attentions,
1282
+ )
1283
+
1284
+ @staticmethod
1285
+ def _reorder_cache(
1286
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1287
+ ) -> Tuple[Tuple[torch.Tensor]]:
1288
+ """
1289
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1290
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1291
+ beam_idx at every generation step.
1292
+ """
1293
+ return tuple(layer_past.index_select(0, beam_idx.to(layer_past.device)) for layer_past in past_key_values)
1294
+
1295
+
1296
+ @add_start_docstrings(
1297
+ """
1298
+ The GPTBigCode Model transformer with a sequence classification head on top (linear layer).
1299
+
1300
+ [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal
1301
+ models (e.g. GPT-1) do.
1302
+
1303
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1304
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1305
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1306
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1307
+ each row of the batch).
1308
+ """,
1309
+ GPT_BIGCODE_START_DOCSTRING,
1310
+ )
1311
+ class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel):
1312
+ def __init__(self, config):
1313
+ super().__init__(config)
1314
+ self.num_labels = config.num_labels
1315
+ self.transformer = GPTBigCodeModel(config)
1316
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1317
+
1318
+ # Initialize weights and apply final processing
1319
+ self.post_init()
1320
+
1321
+ @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
1322
+ def forward(
1323
+ self,
1324
+ input_ids: Optional[torch.Tensor] = None,
1325
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1326
+ attention_mask: Optional[torch.Tensor] = None,
1327
+ token_type_ids: Optional[torch.Tensor] = None,
1328
+ position_ids: Optional[torch.Tensor] = None,
1329
+ head_mask: Optional[torch.Tensor] = None,
1330
+ inputs_embeds: Optional[torch.Tensor] = None,
1331
+ labels: Optional[torch.Tensor] = None,
1332
+ use_cache: Optional[bool] = None,
1333
+ output_attentions: Optional[bool] = None,
1334
+ output_hidden_states: Optional[bool] = None,
1335
+ return_dict: Optional[bool] = None,
1336
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1337
+ r"""
1338
+ labels (`torch.Tensor` of shape `(batch_size,)`, *optional*):
1339
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1340
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1341
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1342
+ """
1343
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1344
+
1345
+ transformer_outputs = self.transformer(
1346
+ input_ids,
1347
+ past_key_values=past_key_values,
1348
+ attention_mask=attention_mask,
1349
+ token_type_ids=token_type_ids,
1350
+ position_ids=position_ids,
1351
+ head_mask=head_mask,
1352
+ inputs_embeds=inputs_embeds,
1353
+ use_cache=use_cache,
1354
+ output_attentions=output_attentions,
1355
+ output_hidden_states=output_hidden_states,
1356
+ return_dict=return_dict,
1357
+ )
1358
+ hidden_states = transformer_outputs[0]
1359
+ logits = self.score(hidden_states)
1360
+
1361
+ if input_ids is not None:
1362
+ batch_size, sequence_length = input_ids.shape[:2]
1363
+ else:
1364
+ batch_size, sequence_length = inputs_embeds.shape[:2]
1365
+
1366
+ assert (
1367
+ self.config.pad_token_id is not None or batch_size == 1
1368
+ ), "Cannot handle batch sizes > 1 if no padding token is defined."
1369
+ if self.config.pad_token_id is None:
1370
+ sequence_lengths = -1
1371
+ else:
1372
+ if input_ids is not None:
1373
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1374
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1375
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1376
+ sequence_lengths = sequence_lengths.to(logits.device)
1377
+ else:
1378
+ sequence_lengths = -1
1379
+ logger.warning(
1380
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1381
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1382
+ )
1383
+
1384
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1385
+
1386
+ loss = None
1387
+ if labels is not None:
1388
+ labels = labels.to(logits.device)
1389
+
1390
+ if self.config.problem_type is None:
1391
+ if self.num_labels == 1:
1392
+ self.config.problem_type = "regression"
1393
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1394
+ self.config.problem_type = "single_label_classification"
1395
+ else:
1396
+ self.config.problem_type = "multi_label_classification"
1397
+
1398
+ if self.config.problem_type == "regression":
1399
+ loss_fct = MSELoss()
1400
+ if self.num_labels == 1:
1401
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1402
+ else:
1403
+ loss = loss_fct(pooled_logits, labels)
1404
+ elif self.config.problem_type == "single_label_classification":
1405
+ loss_fct = CrossEntropyLoss()
1406
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1407
+ elif self.config.problem_type == "multi_label_classification":
1408
+ loss_fct = BCEWithLogitsLoss()
1409
+ loss = loss_fct(pooled_logits, labels)
1410
+ if not return_dict:
1411
+ output = (pooled_logits,) + transformer_outputs[1:]
1412
+ return ((loss,) + output) if loss is not None else output
1413
+
1414
+ return SequenceClassifierOutputWithPast(
1415
+ loss=loss,
1416
+ logits=pooled_logits,
1417
+ past_key_values=transformer_outputs.past_key_values,
1418
+ hidden_states=transformer_outputs.hidden_states,
1419
+ attentions=transformer_outputs.attentions,
1420
+ )
1421
+
1422
+
1423
+ @add_start_docstrings(
1424
+ """
1425
+ GPT_BIGCODE Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
1426
+ for Named-Entity-Recognition (NER) tasks.
1427
+ """,
1428
+ GPT_BIGCODE_START_DOCSTRING,
1429
+ )
1430
+ class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel):
1431
+ def __init__(self, config):
1432
+ super().__init__(config)
1433
+ self.num_labels = config.num_labels
1434
+
1435
+ self.transformer = GPTBigCodeModel(config)
1436
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1437
+ classifier_dropout = config.classifier_dropout
1438
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1439
+ classifier_dropout = config.hidden_dropout
1440
+ else:
1441
+ classifier_dropout = 0.1
1442
+ self.dropout = nn.Dropout(classifier_dropout)
1443
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1444
+
1445
+ # Initialize weights and apply final processing
1446
+ self.post_init()
1447
+
1448
+ @add_start_docstrings_to_model_forward(GPT_BIGCODE_INPUTS_DOCSTRING)
1449
+ def forward(
1450
+ self,
1451
+ input_ids: Optional[torch.Tensor] = None,
1452
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1453
+ attention_mask: Optional[torch.Tensor] = None,
1454
+ token_type_ids: Optional[torch.Tensor] = None,
1455
+ position_ids: Optional[torch.Tensor] = None,
1456
+ head_mask: Optional[torch.Tensor] = None,
1457
+ inputs_embeds: Optional[torch.Tensor] = None,
1458
+ labels: Optional[torch.Tensor] = None,
1459
+ use_cache: Optional[bool] = None,
1460
+ output_attentions: Optional[bool] = None,
1461
+ output_hidden_states: Optional[bool] = None,
1462
+ return_dict: Optional[bool] = None,
1463
+ ) -> Union[Tuple, TokenClassifierOutput]:
1464
+ r"""
1465
+ labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1466
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1467
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1468
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1469
+ """
1470
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1471
+
1472
+ transformer_outputs = self.transformer(
1473
+ input_ids,
1474
+ past_key_values=past_key_values,
1475
+ attention_mask=attention_mask,
1476
+ token_type_ids=token_type_ids,
1477
+ position_ids=position_ids,
1478
+ head_mask=head_mask,
1479
+ inputs_embeds=inputs_embeds,
1480
+ use_cache=use_cache,
1481
+ output_attentions=output_attentions,
1482
+ output_hidden_states=output_hidden_states,
1483
+ return_dict=return_dict,
1484
+ )
1485
+
1486
+ hidden_states = transformer_outputs[0]
1487
+ hidden_states = self.dropout(hidden_states)
1488
+ logits = self.classifier(hidden_states)
1489
+
1490
+ loss = None
1491
+ if labels is not None:
1492
+ loss_fct = CrossEntropyLoss()
1493
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device))
1494
+
1495
+ if not return_dict:
1496
+ output = (logits,) + transformer_outputs[2:]
1497
+ return ((loss,) + output) if loss is not None else output
1498
+
1499
+ return TokenClassifierOutput(
1500
+ loss=loss,
1501
+ logits=logits,
1502
+ hidden_states=transformer_outputs.hidden_states,
1503
+ attentions=transformer_outputs.attentions,
1504
+ )
venv/lib/python3.10/site-packages/transformers/models/gptj/__init__.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {"configuration_gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig", "GPTJOnnxConfig"]}
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_gptj"] = [
34
+ "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "GPTJForCausalLM",
36
+ "GPTJForQuestionAnswering",
37
+ "GPTJForSequenceClassification",
38
+ "GPTJModel",
39
+ "GPTJPreTrainedModel",
40
+ ]
41
+
42
+ try:
43
+ if not is_tf_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ _import_structure["modeling_tf_gptj"] = [
49
+ "TFGPTJForCausalLM",
50
+ "TFGPTJForQuestionAnswering",
51
+ "TFGPTJForSequenceClassification",
52
+ "TFGPTJModel",
53
+ "TFGPTJPreTrainedModel",
54
+ ]
55
+
56
+ try:
57
+ if not is_flax_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ _import_structure["modeling_flax_gptj"] = [
63
+ "FlaxGPTJForCausalLM",
64
+ "FlaxGPTJModel",
65
+ "FlaxGPTJPreTrainedModel",
66
+ ]
67
+
68
+
69
+ if TYPE_CHECKING:
70
+ from .configuration_gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig, GPTJOnnxConfig
71
+
72
+ try:
73
+ if not is_torch_available():
74
+ raise OptionalDependencyNotAvailable()
75
+ except OptionalDependencyNotAvailable:
76
+ pass
77
+ else:
78
+ from .modeling_gptj import (
79
+ GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST,
80
+ GPTJForCausalLM,
81
+ GPTJForQuestionAnswering,
82
+ GPTJForSequenceClassification,
83
+ GPTJModel,
84
+ GPTJPreTrainedModel,
85
+ )
86
+
87
+ try:
88
+ if not is_tf_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ from .modeling_tf_gptj import (
94
+ TFGPTJForCausalLM,
95
+ TFGPTJForQuestionAnswering,
96
+ TFGPTJForSequenceClassification,
97
+ TFGPTJModel,
98
+ TFGPTJPreTrainedModel,
99
+ )
100
+
101
+ try:
102
+ if not is_flax_available():
103
+ raise OptionalDependencyNotAvailable()
104
+ except OptionalDependencyNotAvailable:
105
+ pass
106
+ else:
107
+ from .modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel, FlaxGPTJPreTrainedModel
108
+
109
+ else:
110
+ import sys
111
+
112
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/configuration_gptj.cpython-310.pyc ADDED
Binary file (7.66 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_flax_gptj.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_gptj.cpython-310.pyc ADDED
Binary file (38.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptj/__pycache__/modeling_tf_gptj.cpython-310.pyc ADDED
Binary file (33.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPT-J model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Any, List, Mapping, Optional
18
+
19
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ from ..deprecated._archive_maps import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
29
+
30
+
31
+ class GPTJConfig(PretrainedConfig):
32
+ r"""
33
+ This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
34
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
35
+ defaults will yield a similar configuration to that of the GPT-J
36
+ [EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
37
+ [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
38
+ for more information.
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50400):
42
+ Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`GPTJModel`].
44
+ n_positions (`int`, *optional*, defaults to 2048):
45
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
46
+ just in case (e.g., 512 or 1024 or 2048).
47
+ n_embd (`int`, *optional*, defaults to 4096):
48
+ Dimensionality of the embeddings and hidden states.
49
+ n_layer (`int`, *optional*, defaults to 28):
50
+ Number of hidden layers in the Transformer encoder.
51
+ n_head (`int`, *optional*, defaults to 16):
52
+ Number of attention heads for each attention layer in the Transformer encoder.
53
+ rotary_dim (`int`, *optional*, defaults to 64):
54
+ Number of dimensions in the embedding that Rotary Position Embedding is applied to.
55
+ n_inner (`int`, *optional*, defaults to None):
56
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
57
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
58
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
60
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
62
+ The dropout ratio for the embeddings.
63
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
64
+ The dropout ratio for the attention.
65
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
66
+ The epsilon to use in the layer normalization layers.
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models).
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import GPTJModel, GPTJConfig
76
+
77
+ >>> # Initializing a GPT-J 6B configuration
78
+ >>> configuration = GPTJConfig()
79
+
80
+ >>> # Initializing a model from the configuration
81
+ >>> model = GPTJModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "gptj"
88
+ attribute_map = {
89
+ "max_position_embeddings": "n_positions",
90
+ "hidden_size": "n_embd",
91
+ "num_attention_heads": "n_head",
92
+ "num_hidden_layers": "n_layer",
93
+ }
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_size=50400,
98
+ n_positions=2048,
99
+ n_embd=4096,
100
+ n_layer=28,
101
+ n_head=16,
102
+ rotary_dim=64,
103
+ n_inner=None,
104
+ activation_function="gelu_new",
105
+ resid_pdrop=0.0,
106
+ embd_pdrop=0.0,
107
+ attn_pdrop=0.0,
108
+ layer_norm_epsilon=1e-5,
109
+ initializer_range=0.02,
110
+ use_cache=True,
111
+ bos_token_id=50256,
112
+ eos_token_id=50256,
113
+ tie_word_embeddings=False,
114
+ **kwargs,
115
+ ):
116
+ self.vocab_size = vocab_size
117
+ self.n_positions = n_positions
118
+ self.n_embd = n_embd
119
+ self.n_layer = n_layer
120
+ self.n_head = n_head
121
+ self.n_inner = n_inner
122
+ self.rotary_dim = rotary_dim
123
+ self.activation_function = activation_function
124
+ self.resid_pdrop = resid_pdrop
125
+ self.embd_pdrop = embd_pdrop
126
+ self.attn_pdrop = attn_pdrop
127
+ self.layer_norm_epsilon = layer_norm_epsilon
128
+ self.initializer_range = initializer_range
129
+ self.use_cache = use_cache
130
+
131
+ self.bos_token_id = bos_token_id
132
+ self.eos_token_id = eos_token_id
133
+
134
+ super().__init__(
135
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
136
+ )
137
+
138
+
139
+ # Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
140
+ class GPTJOnnxConfig(OnnxConfigWithPast):
141
+ def __init__(
142
+ self,
143
+ config: PretrainedConfig,
144
+ task: str = "default",
145
+ patching_specs: List[PatchingSpec] = None,
146
+ use_past: bool = False,
147
+ ):
148
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
149
+ if not getattr(self._config, "pad_token_id", None):
150
+ # TODO: how to do that better?
151
+ self._config.pad_token_id = 0
152
+
153
+ @property
154
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
155
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
156
+ if self.use_past:
157
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
158
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
159
+ else:
160
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
161
+
162
+ return common_inputs
163
+
164
+ @property
165
+ def num_layers(self) -> int:
166
+ return self._config.n_layer
167
+
168
+ @property
169
+ def num_attention_heads(self) -> int:
170
+ return self._config.n_head
171
+
172
+ def generate_dummy_inputs(
173
+ self,
174
+ tokenizer: PreTrainedTokenizer,
175
+ batch_size: int = -1,
176
+ seq_length: int = -1,
177
+ is_pair: bool = False,
178
+ framework: Optional[TensorType] = None,
179
+ ) -> Mapping[str, Any]:
180
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
181
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
182
+ )
183
+
184
+ # We need to order the input in the way they appears in the forward()
185
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
186
+
187
+ # Need to add the past_keys
188
+ if self.use_past:
189
+ if not is_torch_available():
190
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
191
+ else:
192
+ import torch
193
+
194
+ batch, seqlen = common_inputs["input_ids"].shape
195
+ # Not using the same length for past_key_values
196
+ past_key_values_length = seqlen + 2
197
+ past_shape = (
198
+ batch,
199
+ self.num_attention_heads,
200
+ past_key_values_length,
201
+ self._config.hidden_size // self.num_attention_heads,
202
+ )
203
+ ordered_inputs["past_key_values"] = [
204
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
205
+ ]
206
+
207
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
208
+ if self.use_past:
209
+ mask_dtype = ordered_inputs["attention_mask"].dtype
210
+ ordered_inputs["attention_mask"] = torch.cat(
211
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
212
+ )
213
+
214
+ return ordered_inputs
215
+
216
+ @property
217
+ def default_onnx_opset(self) -> int:
218
+ return 13
venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_flax_gptj.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from functools import partial
17
+ from typing import Optional, Tuple
18
+
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ import numpy as np
23
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
24
+ from flax.linen import combine_masks, make_causal_mask
25
+ from flax.linen.attention import dot_product_attention_weights
26
+ from flax.traverse_util import flatten_dict, unflatten_dict
27
+ from jax import lax
28
+
29
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
30
+ from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
31
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
32
+ from .configuration_gptj import GPTJConfig
33
+
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CHECKPOINT_FOR_DOC = "gptj"
38
+ _CONFIG_FOR_DOC = "GPTJConfig"
39
+
40
+
41
+ GPTJ_START_DOCSTRING = r"""
42
+
43
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
44
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
45
+ etc.)
46
+
47
+ This model is also a Flax Linen
48
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
49
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
50
+
51
+ Finally, this model supports inherent JAX features such as:
52
+
53
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
54
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
55
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
56
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
57
+
58
+ Parameters:
59
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
60
+ Initializing with a config file does not load the weights associated with the model, only the
61
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
62
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
63
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
64
+ `jax.numpy.bfloat16` (on TPUs).
65
+
66
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
67
+ specified all the computation will be performed with the given `dtype`.
68
+
69
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
70
+ parameters.**
71
+
72
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
73
+ [`~FlaxPreTrainedModel.to_bf16`].
74
+ """
75
+
76
+ GPTJ_INPUTS_DOCSTRING = r"""
77
+ Args:
78
+ input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`):
79
+ `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary.
80
+
81
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
82
+ [`PreTrainedTokenizer.__call__`] for details.
83
+
84
+ [What are input IDs?](../glossary#input-ids)
85
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
87
+
88
+ - 1 for tokens that are **not masked**,
89
+ - 0 for tokens that are **masked**.
90
+
91
+ [What are attention masks?](../glossary#attention-mask)
92
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
93
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
94
+ config.max_position_embeddings - 1]`.
95
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
96
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
97
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
98
+ output_attentions (`bool`, *optional*):
99
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
100
+ tensors for more detail.
101
+ output_hidden_states (`bool`, *optional*):
102
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
103
+ more detail.
104
+ return_dict (`bool`, *optional*):
105
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
106
+ """
107
+
108
+
109
+ def create_sinusoidal_positions(num_pos, dim):
110
+ inv_freq = 1.0 / (10000 ** (np.arange(0, dim, 2) / dim))
111
+ sinusoid_inp = np.einsum("i , j -> i j", np.arange(num_pos), inv_freq).astype("float32")
112
+ sin, cos = np.sin(sinusoid_inp), np.cos(sinusoid_inp)
113
+
114
+ sentinel = dim // 2 + dim % 2
115
+ out = np.zeros((num_pos, dim))
116
+ out[:, 0:sentinel] = sin
117
+ out[:, sentinel:] = cos
118
+
119
+ return jnp.array(out)
120
+
121
+
122
+ def rotate_every_two(tensor):
123
+ rotate_half_tensor = jnp.stack((-tensor[:, :, :, 1::2], tensor[:, :, :, ::2]), axis=-1)
124
+ rotate_half_tensor = rotate_half_tensor.reshape(rotate_half_tensor.shape[:-2] + (-1,))
125
+ return rotate_half_tensor
126
+
127
+
128
+ def apply_rotary_pos_emb(tensor, sincos):
129
+ sin_pos, cos_pos = sincos
130
+ sin_pos = sin_pos[:, :, None, :].repeat(2, 3)
131
+ cos_pos = cos_pos[:, :, None, :].repeat(2, 3)
132
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
133
+
134
+
135
+ class FlaxGPTJAttention(nn.Module):
136
+ config: GPTJConfig
137
+ dtype: jnp.dtype = jnp.float32
138
+ causal: bool = True
139
+ is_cross_attention: bool = False
140
+
141
+ def setup(self):
142
+ config = self.config
143
+ self.embed_dim = config.hidden_size
144
+ self.num_heads = config.num_attention_heads
145
+ self.head_dim = self.embed_dim // self.num_heads
146
+
147
+ self.rotary_dim = config.rotary_dim
148
+
149
+ dense = partial(
150
+ nn.Dense,
151
+ self.embed_dim,
152
+ use_bias=False,
153
+ dtype=self.dtype,
154
+ kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
155
+ )
156
+
157
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
158
+ self.out_proj = dense()
159
+
160
+ self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
161
+
162
+ self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool")
163
+
164
+ pos_embd_dim = self.rotary_dim or self.embed_dim
165
+ self.embed_positions = create_sinusoidal_positions(config.max_position_embeddings, pos_embd_dim)
166
+
167
+ def _split_heads(self, hidden_states):
168
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
169
+
170
+ def _merge_heads(self, hidden_states):
171
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
172
+
173
+ @nn.compact
174
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
175
+ """
176
+ This function takes projected key, value states from a single input token and concatenates the states to cached
177
+ states from previous steps. This function is slighly adapted from the official Flax repository:
178
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
179
+ """
180
+ # detect if we're initializing by absence of existing cache data.
181
+ is_initialized = self.has_variable("cache", "cached_key")
182
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
183
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
184
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
185
+
186
+ if is_initialized:
187
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
188
+ # update key, value caches with our new 1d spatial slices
189
+ cur_index = cache_index.value
190
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
191
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
192
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
193
+ cached_key.value = key
194
+ cached_value.value = value
195
+ num_updated_cache_vectors = query.shape[1]
196
+ cache_index.value = cache_index.value + num_updated_cache_vectors
197
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key
198
+ # positions that have already been generated and cached, not the remaining zero elements.
199
+ pad_mask = jnp.broadcast_to(
200
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
201
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
202
+ )
203
+ attention_mask = combine_masks(pad_mask, attention_mask)
204
+ return key, value, attention_mask
205
+
206
+ def __call__(
207
+ self,
208
+ hidden_states,
209
+ attention_mask,
210
+ position_ids,
211
+ deterministic: bool = True,
212
+ init_cache: bool = False,
213
+ output_attentions: bool = False,
214
+ ):
215
+ query = self.q_proj(hidden_states)
216
+ key = self.k_proj(hidden_states)
217
+ value = self.v_proj(hidden_states)
218
+
219
+ query = self._split_heads(query)
220
+ key = self._split_heads(key)
221
+ value = self._split_heads(value)
222
+
223
+ sincos = jnp.take(self.embed_positions, position_ids, axis=0)
224
+ sincos = jnp.split(sincos, 2, axis=-1)
225
+ if self.rotary_dim is not None:
226
+ k_rot = key[:, :, :, : self.rotary_dim]
227
+ k_pass = key[:, :, :, self.rotary_dim :]
228
+
229
+ q_rot = query[:, :, :, : self.rotary_dim]
230
+ q_pass = query[:, :, :, self.rotary_dim :]
231
+
232
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
233
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
234
+
235
+ key = jnp.concatenate([k_rot, k_pass], axis=-1)
236
+ query = jnp.concatenate([q_rot, q_pass], axis=-1)
237
+ else:
238
+ key = apply_rotary_pos_emb(key, sincos)
239
+ query = apply_rotary_pos_emb(query, sincos)
240
+
241
+ query_length, key_length = query.shape[1], key.shape[1]
242
+
243
+ if self.has_variable("cache", "cached_key"):
244
+ mask_shift = self.variables["cache"]["cache_index"]
245
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
246
+ causal_mask = lax.dynamic_slice(
247
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
248
+ )
249
+ else:
250
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
251
+
252
+ batch_size = hidden_states.shape[0]
253
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
254
+
255
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
256
+ attention_mask = combine_masks(attention_mask, causal_mask)
257
+
258
+ dropout_rng = None
259
+ if not deterministic and self.config.attn_pdrop > 0.0:
260
+ dropout_rng = self.make_rng("dropout")
261
+
262
+ # During fast autoregressive decoding, we feed one position at a time,
263
+ # and cache the keys and values step by step.
264
+ if self.has_variable("cache", "cached_key") or init_cache:
265
+ key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask)
266
+
267
+ # transform boolean mask into float mask
268
+ attention_bias = lax.select(
269
+ attention_mask > 0,
270
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
271
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
272
+ )
273
+
274
+ # usual dot product attention
275
+ attn_weights = dot_product_attention_weights(
276
+ query,
277
+ key,
278
+ bias=attention_bias,
279
+ dropout_rng=dropout_rng,
280
+ dropout_rate=self.config.attn_pdrop,
281
+ deterministic=deterministic,
282
+ dtype=self.dtype,
283
+ precision=None,
284
+ )
285
+
286
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
287
+ attn_output = self._merge_heads(attn_output)
288
+ attn_output = self.out_proj(attn_output)
289
+ attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
290
+
291
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
292
+ return outputs
293
+
294
+
295
+ class FlaxGPTJMLP(nn.Module):
296
+ config: GPTJConfig
297
+ intermediate_size: int
298
+ dtype: jnp.dtype = jnp.float32
299
+
300
+ def setup(self):
301
+ embed_dim = self.config.hidden_size
302
+ kernel_init = jax.nn.initializers.normal(self.config.initializer_range)
303
+
304
+ self.fc_in = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init)
305
+ self.fc_out = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init)
306
+
307
+ self.act = ACT2FN[self.config.activation_function]
308
+ self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
309
+
310
+ def __call__(self, hidden_states, deterministic: bool = True):
311
+ hidden_states = self.fc_in(hidden_states)
312
+ hidden_states = self.act(hidden_states)
313
+ hidden_states = self.fc_out(hidden_states)
314
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
315
+ return hidden_states
316
+
317
+
318
+ class FlaxGPTJBlock(nn.Module):
319
+ config: GPTJConfig
320
+ dtype: jnp.dtype = jnp.float32
321
+
322
+ def setup(self):
323
+ hidden_size = self.config.hidden_size
324
+ inner_dim = self.config.n_inner if self.config.n_inner is not None else 4 * hidden_size
325
+
326
+ self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
327
+ self.attn = FlaxGPTJAttention(self.config, dtype=self.dtype)
328
+
329
+ self.mlp = FlaxGPTJMLP(self.config, inner_dim, dtype=self.dtype)
330
+
331
+ def __call__(
332
+ self,
333
+ hidden_states,
334
+ attention_mask=None,
335
+ position_ids=None,
336
+ deterministic: bool = True,
337
+ init_cache: bool = False,
338
+ output_attentions: bool = False,
339
+ ):
340
+ residual = hidden_states
341
+ hidden_states = self.ln_1(hidden_states)
342
+ attn_outputs = self.attn(
343
+ hidden_states,
344
+ attention_mask=attention_mask,
345
+ position_ids=position_ids,
346
+ deterministic=deterministic,
347
+ init_cache=init_cache,
348
+ output_attentions=output_attentions,
349
+ )
350
+ attn_output = attn_outputs[0]
351
+
352
+ feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic)
353
+ # residual connection
354
+ hidden_states = attn_output + feed_forward_hidden_states + residual
355
+
356
+ return (hidden_states,) + attn_outputs[1:]
357
+
358
+
359
+ class FlaxGPTJPreTrainedModel(FlaxPreTrainedModel):
360
+ """
361
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
362
+ models.
363
+ """
364
+
365
+ config_class = GPTJConfig
366
+ base_model_prefix = "transformer"
367
+ module_class: nn.Module = None
368
+
369
+ def __init__(
370
+ self,
371
+ config: GPTJConfig,
372
+ input_shape: Tuple = (1, 1),
373
+ seed: int = 0,
374
+ dtype: jnp.dtype = jnp.float32,
375
+ _do_init: bool = True,
376
+ **kwargs,
377
+ ):
378
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
379
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
380
+
381
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
382
+ # init input tensors
383
+ input_ids = jnp.zeros(input_shape, dtype="i4")
384
+ attention_mask = jnp.ones_like(input_ids)
385
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
386
+ params_rng, dropout_rng = jax.random.split(rng)
387
+ rngs = {"params": params_rng, "dropout": dropout_rng}
388
+
389
+ if self.config.add_cross_attention:
390
+ encoder_hidden_states = jnp.zeros(input_shape + (self.config.n_embd,))
391
+ encoder_attention_mask = attention_mask
392
+ module_init_outputs = self.module.init(
393
+ rngs,
394
+ input_ids,
395
+ attention_mask,
396
+ position_ids,
397
+ encoder_hidden_states,
398
+ encoder_attention_mask,
399
+ return_dict=False,
400
+ )
401
+ else:
402
+ module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
403
+
404
+ random_params = module_init_outputs["params"]
405
+
406
+ if params is not None:
407
+ random_params = flatten_dict(unfreeze(random_params))
408
+ params = flatten_dict(unfreeze(params))
409
+ for missing_key in self._missing_keys:
410
+ params[missing_key] = random_params[missing_key]
411
+ self._missing_keys = set()
412
+ return freeze(unflatten_dict(params))
413
+ else:
414
+ return random_params
415
+
416
+ def init_cache(self, batch_size, max_length):
417
+ r"""
418
+ Args:
419
+ batch_size (`int`):
420
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
421
+ max_length (`int`):
422
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
423
+ cache.
424
+ """
425
+ # init input variables to retrieve cache
426
+ input_ids = jnp.ones((batch_size, max_length))
427
+ attention_mask = jnp.ones_like(input_ids)
428
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
429
+
430
+ init_variables = self.module.init(
431
+ jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
432
+ )
433
+ return init_variables["cache"]
434
+
435
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
436
+ def __call__(
437
+ self,
438
+ input_ids,
439
+ attention_mask=None,
440
+ position_ids=None,
441
+ params: dict = None,
442
+ past_key_values: dict = None,
443
+ dropout_rng: jax.random.PRNGKey = None,
444
+ train: bool = False,
445
+ output_attentions: Optional[bool] = None,
446
+ output_hidden_states: Optional[bool] = None,
447
+ return_dict: Optional[bool] = None,
448
+ ):
449
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
450
+ output_hidden_states = (
451
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
452
+ )
453
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
454
+
455
+ batch_size, sequence_length = input_ids.shape
456
+
457
+ if position_ids is None:
458
+ if past_key_values is not None:
459
+ raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
460
+
461
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
462
+
463
+ if attention_mask is None:
464
+ attention_mask = jnp.ones((batch_size, sequence_length))
465
+
466
+ # Handle any PRNG if needed
467
+ rngs = {}
468
+ if dropout_rng is not None:
469
+ rngs["dropout"] = dropout_rng
470
+
471
+ inputs = {"params": params or self.params}
472
+
473
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
474
+ if past_key_values:
475
+ inputs["cache"] = past_key_values
476
+ mutable = ["cache"]
477
+ else:
478
+ mutable = False
479
+
480
+ outputs = self.module.apply(
481
+ inputs,
482
+ jnp.array(input_ids, dtype="i4"),
483
+ jnp.array(attention_mask, dtype="i4"),
484
+ jnp.array(position_ids, dtype="i4"),
485
+ not train,
486
+ False,
487
+ output_attentions,
488
+ output_hidden_states,
489
+ return_dict,
490
+ rngs=rngs,
491
+ mutable=mutable,
492
+ )
493
+
494
+ # add updated cache to model output
495
+ if past_key_values is not None and return_dict:
496
+ outputs, past_key_values = outputs
497
+ outputs["past_key_values"] = unfreeze(past_key_values["cache"])
498
+ return outputs
499
+ elif past_key_values is not None and not return_dict:
500
+ outputs, past_key_values = outputs
501
+ outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
502
+
503
+ return outputs
504
+
505
+
506
+ class FlaxGPTJBlockCollection(nn.Module):
507
+ config: GPTJConfig
508
+ dtype: jnp.dtype = jnp.float32
509
+
510
+ def setup(self):
511
+ self.blocks = [
512
+ FlaxGPTJBlock(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
513
+ ]
514
+
515
+ def __call__(
516
+ self,
517
+ hidden_states,
518
+ attention_mask=None,
519
+ position_ids=None,
520
+ deterministic: bool = True,
521
+ init_cache: bool = False,
522
+ output_attentions: bool = False,
523
+ output_hidden_states: bool = False,
524
+ return_dict: bool = True,
525
+ ):
526
+ all_attentions = () if output_attentions else None
527
+ all_hidden_states = () if output_hidden_states else None
528
+
529
+ for block in self.blocks:
530
+ if output_hidden_states:
531
+ all_hidden_states += (hidden_states,)
532
+
533
+ layer_outputs = block(
534
+ hidden_states,
535
+ attention_mask,
536
+ position_ids=position_ids,
537
+ deterministic=deterministic,
538
+ init_cache=init_cache,
539
+ output_attentions=output_attentions,
540
+ )
541
+ hidden_states = layer_outputs[0]
542
+
543
+ if output_attentions:
544
+ all_attentions += (layer_outputs[1],)
545
+
546
+ # this contains possible `None` values - `FlaxGPTJModule` will filter them out
547
+ outputs = (hidden_states, all_hidden_states, all_attentions)
548
+
549
+ return outputs
550
+
551
+
552
+ class FlaxGPTJModule(nn.Module):
553
+ config: GPTJConfig
554
+ dtype: jnp.dtype = jnp.float32
555
+
556
+ def setup(self):
557
+ self.embed_dim = self.config.hidden_size
558
+
559
+ self.wte = nn.Embed(
560
+ self.config.vocab_size,
561
+ self.config.hidden_size,
562
+ embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
563
+ )
564
+ self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
565
+ self.h = FlaxGPTJBlockCollection(self.config, dtype=self.dtype)
566
+ self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype)
567
+
568
+ def __call__(
569
+ self,
570
+ input_ids,
571
+ attention_mask,
572
+ position_ids,
573
+ deterministic=True,
574
+ init_cache: bool = False,
575
+ output_attentions: bool = False,
576
+ output_hidden_states: bool = False,
577
+ return_dict: bool = True,
578
+ ):
579
+ input_embeds = self.wte(input_ids.astype("i4"))
580
+
581
+ hidden_states = self.dropout(input_embeds, deterministic=deterministic)
582
+
583
+ outputs = self.h(
584
+ hidden_states,
585
+ attention_mask,
586
+ position_ids=position_ids,
587
+ deterministic=deterministic,
588
+ init_cache=init_cache,
589
+ output_attentions=output_attentions,
590
+ output_hidden_states=output_hidden_states,
591
+ return_dict=return_dict,
592
+ )
593
+
594
+ hidden_states = outputs[0]
595
+ hidden_states = self.ln_f(hidden_states)
596
+
597
+ if output_hidden_states:
598
+ all_hidden_states = outputs[1] + (hidden_states,)
599
+ outputs = (hidden_states, all_hidden_states) + outputs[2:]
600
+ else:
601
+ outputs = (hidden_states,) + outputs[1:]
602
+
603
+ if not return_dict:
604
+ return tuple(v for v in outputs if v is not None)
605
+
606
+ return FlaxBaseModelOutput(
607
+ last_hidden_state=hidden_states,
608
+ hidden_states=outputs[1],
609
+ attentions=outputs[-1],
610
+ )
611
+
612
+
613
+ @add_start_docstrings(
614
+ "The bare GPTJ Model transformer outputting raw hidden-states without any specific head on top.",
615
+ GPTJ_START_DOCSTRING,
616
+ )
617
+ class FlaxGPTJModel(FlaxGPTJPreTrainedModel):
618
+ module_class = FlaxGPTJModule
619
+
620
+
621
+ append_call_sample_docstring(
622
+ FlaxGPTJModel,
623
+ _CHECKPOINT_FOR_DOC,
624
+ FlaxCausalLMOutput,
625
+ _CONFIG_FOR_DOC,
626
+ )
627
+
628
+
629
+ class FlaxGPTJForCausalLMModule(nn.Module):
630
+ config: GPTJConfig
631
+ dtype: jnp.dtype = jnp.float32
632
+
633
+ def setup(self):
634
+ self.transformer = FlaxGPTJModule(self.config, dtype=self.dtype)
635
+ self.lm_head = nn.Dense(
636
+ self.config.vocab_size,
637
+ dtype=self.dtype,
638
+ kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
639
+ )
640
+
641
+ def __call__(
642
+ self,
643
+ input_ids,
644
+ attention_mask,
645
+ position_ids,
646
+ deterministic: bool = True,
647
+ init_cache: bool = False,
648
+ output_attentions: bool = False,
649
+ output_hidden_states: bool = False,
650
+ return_dict: bool = True,
651
+ ):
652
+ outputs = self.transformer(
653
+ input_ids,
654
+ attention_mask,
655
+ position_ids,
656
+ deterministic=deterministic,
657
+ init_cache=init_cache,
658
+ output_attentions=output_attentions,
659
+ output_hidden_states=output_hidden_states,
660
+ return_dict=return_dict,
661
+ )
662
+
663
+ hidden_states = outputs[0]
664
+
665
+ if self.config.tie_word_embeddings:
666
+ shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
667
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
668
+ else:
669
+ lm_logits = self.lm_head(hidden_states)
670
+
671
+ if not return_dict:
672
+ return (lm_logits,) + outputs[1:]
673
+
674
+ return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
675
+
676
+
677
+ @add_start_docstrings(
678
+ """
679
+ The GPTJ Model transformer with a language modeling head on top.
680
+ """,
681
+ GPTJ_START_DOCSTRING,
682
+ )
683
+ class FlaxGPTJForCausalLM(FlaxGPTJPreTrainedModel):
684
+ module_class = FlaxGPTJForCausalLMModule
685
+
686
+ def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
687
+ # initializing the cache
688
+ batch_size, seq_length = input_ids.shape
689
+
690
+ past_key_values = self.init_cache(batch_size, max_length)
691
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
692
+ # But since GPTJ uses a causal mask, those positions are masked anyways.
693
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
694
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
695
+ if attention_mask is not None:
696
+ position_ids = attention_mask.cumsum(axis=-1) - 1
697
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
698
+ else:
699
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
700
+
701
+ return {
702
+ "past_key_values": past_key_values,
703
+ "attention_mask": extended_attention_mask,
704
+ "position_ids": position_ids,
705
+ }
706
+
707
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
708
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
709
+ model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
710
+ return model_kwargs
711
+
712
+
713
+ append_call_sample_docstring(
714
+ FlaxGPTJForCausalLM,
715
+ _CHECKPOINT_FOR_DOC,
716
+ FlaxCausalLMOutput,
717
+ _CONFIG_FOR_DOC,
718
+ )
venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_gptj.py ADDED
@@ -0,0 +1,1427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPT-J model."""
16
+
17
+ import warnings
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.fx
22
+ import torch.nn.functional as F
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ is_flash_attn_2_available,
40
+ is_flash_attn_greater_or_equal_2_10,
41
+ is_torch_fx_proxy,
42
+ logging,
43
+ )
44
+ from ...utils.model_parallel_utils import assert_device_map, get_device_map
45
+ from .configuration_gptj import GPTJConfig
46
+
47
+
48
+ if is_flash_attn_2_available():
49
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
50
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
56
+ _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
57
+ _CONFIG_FOR_DOC = "GPTJConfig"
58
+
59
+
60
+ from ..deprecated._archive_maps import GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
61
+
62
+
63
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
64
+ def _get_unpad_data(attention_mask):
65
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
66
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
67
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
68
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
69
+ return (
70
+ indices,
71
+ cu_seqlens,
72
+ max_seqlen_in_batch,
73
+ )
74
+
75
+
76
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
77
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim))
78
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float()
79
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
80
+
81
+
82
+ @torch.fx.wrap
83
+ def get_embed_positions(embed_positions, position_ids):
84
+ return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1)
85
+
86
+
87
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
88
+ x1 = x[:, :, :, ::2]
89
+ x2 = x[:, :, :, 1::2]
90
+ x = torch.stack((-x2, x1), dim=-1)
91
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
92
+
93
+
94
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
95
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
96
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
97
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
98
+
99
+
100
+ class GPTJAttention(nn.Module):
101
+ def __init__(self, config):
102
+ super().__init__()
103
+ self.config = config
104
+ max_positions = config.max_position_embeddings
105
+ self.register_buffer(
106
+ "bias",
107
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
108
+ 1, 1, max_positions, max_positions
109
+ ),
110
+ persistent=False,
111
+ )
112
+ self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False)
113
+
114
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
115
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
116
+
117
+ self.is_causal = True
118
+
119
+ self.embed_dim = config.hidden_size
120
+ self.num_attention_heads = config.num_attention_heads
121
+ self.head_dim = self.embed_dim // self.num_attention_heads
122
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
123
+ raise ValueError(
124
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
125
+ f" `num_attention_heads`: {self.num_attention_heads})."
126
+ )
127
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
128
+
129
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
130
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
131
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
132
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
133
+ self.rotary_dim = config.rotary_dim
134
+ pos_embd_dim = self.rotary_dim or self.embed_dim
135
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
136
+
137
+ def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
138
+ """
139
+ Splits hidden dim into attn_head_size and num_attention_heads
140
+ """
141
+ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
142
+ tensor = tensor.view(new_shape)
143
+ if rotary:
144
+ return tensor
145
+ if len(tensor.shape) == 5:
146
+ return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
147
+ elif len(tensor.shape) == 4:
148
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
149
+ else:
150
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
151
+
152
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
153
+ """
154
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
155
+ """
156
+ if len(tensor.shape) == 5:
157
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
158
+ elif len(tensor.shape) == 4:
159
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
160
+ else:
161
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
162
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
163
+ return tensor.view(new_shape)
164
+
165
+ def _attn(
166
+ self,
167
+ query,
168
+ key,
169
+ value,
170
+ attention_mask=None,
171
+ head_mask=None,
172
+ ):
173
+ # compute causal mask from causal mask buffer
174
+ query_length, key_length = query.size(-2), key.size(-2)
175
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
176
+
177
+ # Keep the attention weights computation in fp32 to avoid overflow issues
178
+ query = query.to(torch.float32)
179
+ key = key.to(torch.float32)
180
+
181
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
182
+
183
+ mask_value = torch.finfo(attn_weights.dtype).min
184
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
185
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
186
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
187
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
188
+
189
+ attn_weights = attn_weights / self.scale_attn
190
+
191
+ if attention_mask is not None:
192
+ # Apply the attention mask
193
+ attn_weights = attn_weights + attention_mask
194
+
195
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
196
+ attn_weights = attn_weights.to(value.dtype)
197
+ attn_weights = self.attn_dropout(attn_weights)
198
+
199
+ # Mask heads if we want to
200
+ if head_mask is not None:
201
+ attn_weights = attn_weights * head_mask
202
+
203
+ attn_output = torch.matmul(attn_weights, value)
204
+
205
+ return attn_output, attn_weights
206
+
207
+ def _get_embed_positions(self, position_ids):
208
+ embed_positions = self.embed_positions
209
+ if embed_positions.device != position_ids.device:
210
+ embed_positions = embed_positions.to(position_ids.device)
211
+ self.embed_positions = embed_positions
212
+ return embed_positions.repeat(position_ids.shape[0], 1, 1)
213
+
214
+ def forward(
215
+ self,
216
+ hidden_states: torch.FloatTensor,
217
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
218
+ attention_mask: Optional[torch.FloatTensor] = None,
219
+ position_ids: Optional[torch.LongTensor] = None,
220
+ head_mask: Optional[torch.FloatTensor] = None,
221
+ use_cache: Optional[bool] = False,
222
+ output_attentions: Optional[bool] = False,
223
+ ) -> Union[
224
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
225
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
226
+ ]:
227
+ query = self.q_proj(hidden_states)
228
+ key = self.k_proj(hidden_states)
229
+ value = self.v_proj(hidden_states)
230
+
231
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
232
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
233
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
234
+
235
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
236
+ # The logic to conditionally copy to GPU could not be traced, so we do this
237
+ # every time in the torch.fx case
238
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
239
+ else:
240
+ embed_positions = self._get_embed_positions(position_ids)
241
+
242
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
243
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
244
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
245
+
246
+ if self.rotary_dim is not None:
247
+ k_rot = key[:, :, :, : self.rotary_dim]
248
+ k_pass = key[:, :, :, self.rotary_dim :]
249
+
250
+ q_rot = query[:, :, :, : self.rotary_dim]
251
+ q_pass = query[:, :, :, self.rotary_dim :]
252
+
253
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
254
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
255
+
256
+ key = torch.cat([k_rot, k_pass], dim=-1)
257
+ query = torch.cat([q_rot, q_pass], dim=-1)
258
+ else:
259
+ key = apply_rotary_pos_emb(key, sin, cos)
260
+ query = apply_rotary_pos_emb(query, sin, cos)
261
+
262
+ key = key.permute(0, 2, 1, 3)
263
+ query = query.permute(0, 2, 1, 3)
264
+
265
+ if layer_past is not None:
266
+ past_key = layer_past[0]
267
+ past_value = layer_past[1]
268
+ key = torch.cat((past_key, key), dim=-2)
269
+ value = torch.cat((past_value, value), dim=-2)
270
+
271
+ if use_cache is True:
272
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
273
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
274
+ present = (key.to(hidden_states.dtype), value)
275
+ else:
276
+ present = None
277
+
278
+ # compute self-attention: V x Softmax(QK^T)
279
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
280
+
281
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
282
+ attn_output = self.out_proj(attn_output)
283
+ attn_output = self.resid_dropout(attn_output)
284
+
285
+ outputs = (attn_output, present)
286
+ if output_attentions:
287
+ outputs += (attn_weights,)
288
+
289
+ return outputs # a, present, (attentions)
290
+
291
+
292
+ class GPTJFlashAttention2(GPTJAttention):
293
+ """
294
+ GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays
295
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
296
+ flash attention and deal with padding tokens in case the input contains any of them.
297
+ """
298
+
299
+ def __init__(self, *args, **kwargs):
300
+ super().__init__(*args, **kwargs)
301
+
302
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
303
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
304
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
305
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
306
+
307
+ def forward(
308
+ self,
309
+ hidden_states: torch.FloatTensor,
310
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
311
+ attention_mask: Optional[torch.FloatTensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ head_mask: Optional[torch.FloatTensor] = None,
314
+ use_cache: Optional[bool] = False,
315
+ output_attentions: Optional[bool] = False,
316
+ ) -> Union[
317
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
318
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
319
+ ]:
320
+ query = self.q_proj(hidden_states)
321
+ key = self.k_proj(hidden_states)
322
+ value = self.v_proj(hidden_states)
323
+
324
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
325
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
326
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
327
+
328
+ if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing():
329
+ # The logic to conditionally copy to GPU could not be traced, so we do this
330
+ # every time in the torch.fx case
331
+ embed_positions = get_embed_positions(self.embed_positions, position_ids)
332
+ else:
333
+ embed_positions = self._get_embed_positions(position_ids)
334
+
335
+ repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1])
336
+ sincos = torch.gather(embed_positions, 1, repeated_position_ids)
337
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
338
+
339
+ if self.rotary_dim is not None:
340
+ k_rot = key[:, :, :, : self.rotary_dim]
341
+ k_pass = key[:, :, :, self.rotary_dim :]
342
+
343
+ q_rot = query[:, :, :, : self.rotary_dim]
344
+ q_pass = query[:, :, :, self.rotary_dim :]
345
+
346
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
347
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
348
+
349
+ key = torch.cat([k_rot, k_pass], dim=-1)
350
+ query = torch.cat([q_rot, q_pass], dim=-1)
351
+ else:
352
+ key = apply_rotary_pos_emb(key, sin, cos)
353
+ query = apply_rotary_pos_emb(query, sin, cos)
354
+
355
+ # tanspose to have the desired shape
356
+ # before transpose: batch_size x seq_length x num_attention_heads x head_dim
357
+ # after transpose: batch_size x num_attention_heads x seq_length x head_dim
358
+ key = key.permute(0, 2, 1, 3)
359
+ query = query.permute(0, 2, 1, 3)
360
+ # value: batch_size x num_attention_heads x seq_length x head_dim
361
+
362
+ if layer_past is not None:
363
+ past_key = layer_past[0]
364
+ past_value = layer_past[1]
365
+ key = torch.cat((past_key, key), dim=-2)
366
+ value = torch.cat((past_value, value), dim=-2)
367
+
368
+ if use_cache is True:
369
+ # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation.
370
+ # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128
371
+ present = (key.to(hidden_states.dtype), value)
372
+ else:
373
+ present = None
374
+
375
+ # The Flash attention requires the input to have the shape
376
+ # batch_size x seq_length x head_dim x hidden_dim
377
+ # therefore we need to keep the original shape for query and key, and reshape value
378
+ # to have the correct shape.
379
+ key = key.permute(0, 2, 1, 3).contiguous()
380
+ query = query.permute(0, 2, 1, 3).contiguous()
381
+ value = value.permute(0, 2, 1, 3).contiguous()
382
+
383
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
384
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
385
+ # cast them back in the correct dtype just to be sure everything works as expected.
386
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
387
+ # in fp32. (LlamaRMSNorm handles it correctly)
388
+
389
+ input_dtype = query.dtype
390
+ if input_dtype == torch.float32:
391
+ if torch.is_autocast_enabled():
392
+ target_dtype = torch.get_autocast_gpu_dtype()
393
+ # Handle the case where the model is quantized
394
+ elif hasattr(self.config, "_pre_quantization_dtype"):
395
+ target_dtype = self.config._pre_quantization_dtype
396
+ else:
397
+ target_dtype = self.q_proj.weight.dtype
398
+
399
+ logger.warning_once(
400
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
401
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
402
+ f" {target_dtype}."
403
+ )
404
+
405
+ query = query.to(target_dtype)
406
+ key = key.to(target_dtype)
407
+ value = value.to(target_dtype)
408
+
409
+ attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj
410
+
411
+ query_length = query.shape[1]
412
+
413
+ # Compute attention
414
+ attn_weights = self._flash_attention_forward(
415
+ query,
416
+ key,
417
+ value,
418
+ attention_mask,
419
+ query_length,
420
+ dropout=attention_dropout,
421
+ )
422
+
423
+ # Reshape outputs
424
+ attn_output = attn_weights.reshape(
425
+ attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3]
426
+ )
427
+ attn_output = self.out_proj(attn_output)
428
+ attn_output = self.resid_dropout(attn_output)
429
+
430
+ outputs = (attn_output, present)
431
+ if output_attentions:
432
+ outputs += (attn_weights,)
433
+
434
+ return outputs
435
+
436
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
437
+ def _flash_attention_forward(
438
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
439
+ ):
440
+ """
441
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
442
+ first unpad the input, then computes the attention scores and pad the final attention scores.
443
+
444
+ Args:
445
+ query_states (`torch.Tensor`):
446
+ Input query states to be passed to Flash Attention API
447
+ key_states (`torch.Tensor`):
448
+ Input key states to be passed to Flash Attention API
449
+ value_states (`torch.Tensor`):
450
+ Input value states to be passed to Flash Attention API
451
+ attention_mask (`torch.Tensor`):
452
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
453
+ position of padding tokens and 1 for the position of non-padding tokens.
454
+ dropout (`float`):
455
+ Attention dropout
456
+ softmax_scale (`float`, *optional*):
457
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
458
+ """
459
+ if not self._flash_attn_uses_top_left_mask:
460
+ causal = self.is_causal
461
+ else:
462
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
463
+ causal = self.is_causal and query_length != 1
464
+
465
+ # Contains at least one padding token in the sequence
466
+ if attention_mask is not None:
467
+ batch_size = query_states.shape[0]
468
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
469
+ query_states, key_states, value_states, attention_mask, query_length
470
+ )
471
+
472
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
473
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
474
+
475
+ attn_output_unpad = flash_attn_varlen_func(
476
+ query_states,
477
+ key_states,
478
+ value_states,
479
+ cu_seqlens_q=cu_seqlens_q,
480
+ cu_seqlens_k=cu_seqlens_k,
481
+ max_seqlen_q=max_seqlen_in_batch_q,
482
+ max_seqlen_k=max_seqlen_in_batch_k,
483
+ dropout_p=dropout,
484
+ softmax_scale=softmax_scale,
485
+ causal=causal,
486
+ )
487
+
488
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
489
+ else:
490
+ attn_output = flash_attn_func(
491
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
492
+ )
493
+
494
+ return attn_output
495
+
496
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input with num_heads->num_attention_heads
497
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
498
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
499
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
500
+
501
+ key_layer = index_first_axis(
502
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
503
+ )
504
+ value_layer = index_first_axis(
505
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
506
+ )
507
+ if query_length == kv_seq_len:
508
+ query_layer = index_first_axis(
509
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads, head_dim), indices_k
510
+ )
511
+ cu_seqlens_q = cu_seqlens_k
512
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
513
+ indices_q = indices_k
514
+ elif query_length == 1:
515
+ max_seqlen_in_batch_q = 1
516
+ cu_seqlens_q = torch.arange(
517
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
518
+ ) # There is a memcpy here, that is very bad.
519
+ indices_q = cu_seqlens_q[:-1]
520
+ query_layer = query_layer.squeeze(1)
521
+ else:
522
+ # The -q_len: slice assumes left padding.
523
+ attention_mask = attention_mask[:, -query_length:]
524
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
525
+
526
+ return (
527
+ query_layer,
528
+ key_layer,
529
+ value_layer,
530
+ indices_q,
531
+ (cu_seqlens_q, cu_seqlens_k),
532
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
533
+ )
534
+
535
+
536
+ GPTJ_ATTENTION_CLASSES = {
537
+ "eager": GPTJAttention,
538
+ "flash_attention_2": GPTJFlashAttention2,
539
+ }
540
+
541
+
542
+ class GPTJMLP(nn.Module):
543
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
544
+ super().__init__()
545
+ embed_dim = config.n_embd
546
+
547
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
548
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
549
+
550
+ self.act = ACT2FN[config.activation_function]
551
+ self.dropout = nn.Dropout(config.resid_pdrop)
552
+
553
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
554
+ hidden_states = self.fc_in(hidden_states)
555
+ hidden_states = self.act(hidden_states)
556
+ hidden_states = self.fc_out(hidden_states)
557
+ hidden_states = self.dropout(hidden_states)
558
+ return hidden_states
559
+
560
+
561
+ class GPTJBlock(nn.Module):
562
+ def __init__(self, config):
563
+ super().__init__()
564
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
565
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
566
+ self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config)
567
+ self.mlp = GPTJMLP(inner_dim, config)
568
+
569
+ def forward(
570
+ self,
571
+ hidden_states: Optional[torch.FloatTensor],
572
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
573
+ attention_mask: Optional[torch.FloatTensor] = None,
574
+ position_ids: Optional[torch.LongTensor] = None,
575
+ head_mask: Optional[torch.FloatTensor] = None,
576
+ use_cache: Optional[bool] = False,
577
+ output_attentions: Optional[bool] = False,
578
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
579
+ residual = hidden_states
580
+ hidden_states = self.ln_1(hidden_states)
581
+ attn_outputs = self.attn(
582
+ hidden_states=hidden_states,
583
+ layer_past=layer_past,
584
+ attention_mask=attention_mask,
585
+ position_ids=position_ids,
586
+ head_mask=head_mask,
587
+ use_cache=use_cache,
588
+ output_attentions=output_attentions,
589
+ )
590
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
591
+ outputs = attn_outputs[1:]
592
+
593
+ feed_forward_hidden_states = self.mlp(hidden_states)
594
+ hidden_states = attn_output + feed_forward_hidden_states + residual
595
+
596
+ if use_cache:
597
+ outputs = (hidden_states,) + outputs
598
+ else:
599
+ outputs = (hidden_states,) + outputs[1:]
600
+
601
+ return outputs # hidden_states, present, (attentions)
602
+
603
+
604
+ class GPTJPreTrainedModel(PreTrainedModel):
605
+ """
606
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
607
+ models.
608
+ """
609
+
610
+ config_class = GPTJConfig
611
+ base_model_prefix = "transformer"
612
+ is_parallelizable = True
613
+ supports_gradient_checkpointing = True
614
+ _no_split_modules = ["GPTJBlock"]
615
+ _skip_keys_device_placement = "past_key_values"
616
+ _supports_flash_attn_2 = True
617
+
618
+ def __init__(self, *inputs, **kwargs):
619
+ super().__init__(*inputs, **kwargs)
620
+
621
+ def _init_weights(self, module):
622
+ """Initialize the weights."""
623
+ if isinstance(module, (nn.Linear,)):
624
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
625
+ # cf https://github.com/pytorch/pytorch/pull/5617
626
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
627
+ if module.bias is not None:
628
+ module.bias.data.zero_()
629
+ elif isinstance(module, nn.Embedding):
630
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
631
+ if module.padding_idx is not None:
632
+ module.weight.data[module.padding_idx].zero_()
633
+ elif isinstance(module, nn.LayerNorm):
634
+ module.bias.data.zero_()
635
+ module.weight.data.fill_(1.0)
636
+
637
+
638
+ GPTJ_START_DOCSTRING = r"""
639
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
640
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
641
+ behavior.
642
+
643
+ Parameters:
644
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
645
+ Initializing with a config file does not load the weights associated with the model, only the
646
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
647
+ """
648
+
649
+ GPTJ_INPUTS_DOCSTRING = r"""
650
+ Args:
651
+ input_ids (`torch.LongTensor` of shape `({0})`):
652
+ Indices of input sequence tokens in the vocabulary.
653
+
654
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
655
+ [`PreTrainedTokenizer.__call__`] for details.
656
+
657
+ [What are input IDs?](../glossary#input-ids)
658
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
659
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
660
+
661
+ - 1 for tokens that are **not masked**,
662
+ - 0 for tokens that are **masked**.
663
+
664
+ [What are attention masks?](../glossary#attention-mask)
665
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
666
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
667
+ 1]`:
668
+
669
+ - 0 corresponds to a *sentence A* token,
670
+ - 1 corresponds to a *sentence B* token.
671
+
672
+ [What are token type IDs?](../glossary#token-type-ids)
673
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
674
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
675
+ config.n_positions - 1]`.
676
+
677
+ [What are position IDs?](../glossary#position-ids)
678
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
679
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
680
+
681
+ - 1 indicates the head is **not masked**,
682
+ - 0 indicates the head is **masked**.
683
+
684
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
685
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
686
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
687
+ model's internal embedding lookup matrix.
688
+ output_attentions (`bool`, *optional*):
689
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
690
+ tensors for more detail.
691
+ output_hidden_states (`bool`, *optional*):
692
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
693
+ more detail.
694
+ return_dict (`bool`, *optional*):
695
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
696
+ """
697
+
698
+ PARALLELIZE_DOCSTRING = r"""
699
+ This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
700
+ attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
701
+ across all devices.
702
+
703
+ Args:
704
+ device_map (`Dict[int, list]`, optional, defaults to None):
705
+ A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
706
+ automatically mapped to the first device (for esoteric reasons). That means that the first device should
707
+ have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
708
+ following number of attention modules:
709
+
710
+ - gpt-j-6B: 28
711
+
712
+ Example:
713
+
714
+ ```python
715
+ # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
716
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
717
+ device_map = {
718
+ 0: [0, 1, 2, 3, 4, 5, 6],
719
+ 1: [7, 8, 9, 10, 11, 12, 13],
720
+ 2: [14, 15, 16, 17, 18, 19, 20],
721
+ 3: [21, 22, 23, 24, 25, 26, 27],
722
+ }
723
+ model.parallelize(device_map)
724
+ ```
725
+ """
726
+
727
+ DEPARALLELIZE_DOCSTRING = r"""
728
+ Moves the model to CPU from a model parallel state.
729
+
730
+ Example:
731
+
732
+ ```python
733
+ # On a 4 GPU machine with gpt-j-6B:
734
+ model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
735
+ device_map = {
736
+ 0: [0, 1, 2, 3, 4, 5, 6],
737
+ 1: [7, 8, 9, 10, 11, 12, 13],
738
+ 2: [14, 15, 16, 17, 18, 19, 20],
739
+ 3: [21, 22, 23, 24, 25, 26, 27],
740
+ }
741
+ model.parallelize(device_map) # Splits the model across several devices
742
+ model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
743
+ ```
744
+ """
745
+
746
+
747
+ @add_start_docstrings(
748
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
749
+ GPTJ_START_DOCSTRING,
750
+ )
751
+ class GPTJModel(GPTJPreTrainedModel):
752
+ def __init__(self, config):
753
+ super().__init__(config)
754
+
755
+ self.embed_dim = config.n_embd
756
+ self.vocab_size = config.vocab_size
757
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
758
+ self.drop = nn.Dropout(config.embd_pdrop)
759
+ self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
760
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
761
+
762
+ # Model parallel
763
+ self.model_parallel = False
764
+ self.device_map = None
765
+ self.gradient_checkpointing = False
766
+
767
+ # Initialize weights and apply final processing
768
+ self.post_init()
769
+
770
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
771
+
772
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
773
+ def parallelize(self, device_map=None):
774
+ warnings.warn(
775
+ "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
776
+ " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
777
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
778
+ " ...}",
779
+ FutureWarning,
780
+ )
781
+ # Check validity of device_map
782
+ self.device_map = (
783
+ get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
784
+ )
785
+ assert_device_map(self.device_map, len(self.h))
786
+ self.model_parallel = True
787
+ self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
788
+ self.last_device = "cuda:" + str(max(self.device_map.keys()))
789
+ self.wte = self.wte.to(self.first_device)
790
+ # Load onto devices
791
+ for k, v in self.device_map.items():
792
+ for block in v:
793
+ cuda_device = "cuda:" + str(k)
794
+ self.h[block] = self.h[block].to(cuda_device)
795
+ # ln_f to last
796
+ self.ln_f = self.ln_f.to(self.last_device)
797
+
798
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
799
+ def deparallelize(self):
800
+ warnings.warn(
801
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
802
+ FutureWarning,
803
+ )
804
+ self.model_parallel = False
805
+ self.device_map = None
806
+ self.first_device = "cpu"
807
+ self.last_device = "cpu"
808
+ self.wte = self.wte.to("cpu")
809
+ for index in range(len(self.h)):
810
+ self.h[index] = self.h[index].to("cpu")
811
+ self.ln_f = self.ln_f.to("cpu")
812
+ torch.cuda.empty_cache()
813
+
814
+ def get_input_embeddings(self):
815
+ return self.wte
816
+
817
+ def set_input_embeddings(self, new_embeddings):
818
+ self.wte = new_embeddings
819
+
820
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
821
+ @add_code_sample_docstrings(
822
+ checkpoint=_CHECKPOINT_FOR_DOC,
823
+ output_type=BaseModelOutputWithPast,
824
+ config_class=_CONFIG_FOR_DOC,
825
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
826
+ )
827
+ def forward(
828
+ self,
829
+ input_ids: Optional[torch.LongTensor] = None,
830
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
831
+ attention_mask: Optional[torch.FloatTensor] = None,
832
+ token_type_ids: Optional[torch.LongTensor] = None,
833
+ position_ids: Optional[torch.LongTensor] = None,
834
+ head_mask: Optional[torch.FloatTensor] = None,
835
+ inputs_embeds: Optional[torch.FloatTensor] = None,
836
+ use_cache: Optional[bool] = None,
837
+ output_attentions: Optional[bool] = None,
838
+ output_hidden_states: Optional[bool] = None,
839
+ return_dict: Optional[bool] = None,
840
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
841
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
842
+ output_hidden_states = (
843
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
844
+ )
845
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
846
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
847
+
848
+ if input_ids is not None and inputs_embeds is not None:
849
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
850
+ elif input_ids is not None:
851
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
852
+ input_shape = input_ids.size()
853
+ input_ids = input_ids.view(-1, input_shape[-1])
854
+ batch_size = input_ids.shape[0]
855
+ elif inputs_embeds is not None:
856
+ input_shape = inputs_embeds.size()[:-1]
857
+ batch_size = inputs_embeds.shape[0]
858
+ else:
859
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
860
+
861
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
862
+
863
+ if token_type_ids is not None:
864
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
865
+
866
+ if past_key_values is None:
867
+ past_length = 0
868
+ past_key_values = tuple([None] * len(self.h))
869
+ else:
870
+ past_length = past_key_values[0][0].size(-2)
871
+
872
+ if position_ids is None:
873
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
874
+ position_ids = position_ids.unsqueeze(0)
875
+
876
+ if not self._use_flash_attention_2:
877
+ # Attention mask.
878
+ if attention_mask is not None:
879
+ if batch_size <= 0:
880
+ raise ValueError("batch_size has to be defined and > 0")
881
+ attention_mask = attention_mask.view(batch_size, -1)
882
+ # We create a 3D attention mask from a 2D tensor mask.
883
+ # Sizes are [batch_size, 1, 1, to_seq_length]
884
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
885
+ # this attention mask is more simple than the triangular masking of causal attention
886
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
887
+ attention_mask = attention_mask[:, None, None, :]
888
+
889
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
890
+ # masked positions, this operation will create a tensor which is 0.0 for
891
+ # positions we want to attend and the dtype's smallest value for masked positions.
892
+ # Since we are adding it to the raw scores before the softmax, this is
893
+ # effectively the same as removing these entirely.
894
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
895
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
896
+
897
+ # Prepare head mask if needed
898
+ # 1.0 in head_mask indicate we keep the head
899
+ # attention_probs has shape bsz x num_attention_heads x N x N
900
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
901
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
902
+
903
+ if inputs_embeds is None:
904
+ inputs_embeds = self.wte(input_ids)
905
+
906
+ hidden_states = inputs_embeds
907
+
908
+ if token_type_ids is not None:
909
+ token_type_embeds = self.wte(token_type_ids)
910
+ hidden_states = hidden_states + token_type_embeds
911
+
912
+ hidden_states = self.drop(hidden_states)
913
+
914
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
915
+
916
+ if self.gradient_checkpointing and self.training:
917
+ if use_cache:
918
+ logger.warning_once(
919
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
920
+ )
921
+ use_cache = False
922
+
923
+ presents = () if use_cache else None
924
+ all_self_attentions = () if output_attentions else None
925
+ all_hidden_states = () if output_hidden_states else None
926
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
927
+ # Model parallel
928
+ if self.model_parallel:
929
+ torch.cuda.set_device(hidden_states.device)
930
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
931
+ if layer_past is not None:
932
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
933
+ # Ensure that attention_mask is always on the same device as hidden_states
934
+ if attention_mask is not None:
935
+ attention_mask = attention_mask.to(hidden_states.device)
936
+ if isinstance(head_mask, torch.Tensor):
937
+ head_mask = head_mask.to(hidden_states.device)
938
+ if output_hidden_states:
939
+ all_hidden_states = all_hidden_states + (hidden_states,)
940
+
941
+ if self.gradient_checkpointing and self.training:
942
+ outputs = self._gradient_checkpointing_func(
943
+ block.__call__,
944
+ hidden_states,
945
+ None,
946
+ attention_mask,
947
+ position_ids,
948
+ head_mask[i],
949
+ use_cache,
950
+ output_attentions,
951
+ )
952
+ else:
953
+ outputs = block(
954
+ hidden_states=hidden_states,
955
+ layer_past=layer_past,
956
+ attention_mask=attention_mask,
957
+ position_ids=position_ids,
958
+ head_mask=head_mask[i],
959
+ use_cache=use_cache,
960
+ output_attentions=output_attentions,
961
+ )
962
+
963
+ hidden_states = outputs[0]
964
+ if use_cache is True:
965
+ presents = presents + (outputs[1],)
966
+
967
+ if output_attentions:
968
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
969
+
970
+ # Model Parallel: If it's the last layer for that device, put things on the next device
971
+ if self.model_parallel:
972
+ for k, v in self.device_map.items():
973
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
974
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
975
+
976
+ hidden_states = self.ln_f(hidden_states)
977
+
978
+ hidden_states = hidden_states.view(output_shape)
979
+ # Add last hidden state
980
+ if output_hidden_states:
981
+ all_hidden_states = all_hidden_states + (hidden_states,)
982
+
983
+ if not return_dict:
984
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
985
+
986
+ return BaseModelOutputWithPast(
987
+ last_hidden_state=hidden_states,
988
+ past_key_values=presents,
989
+ hidden_states=all_hidden_states,
990
+ attentions=all_self_attentions,
991
+ )
992
+
993
+
994
+ @add_start_docstrings(
995
+ """
996
+ The GPT-J Model transformer with a language modeling head on top.
997
+ """,
998
+ GPTJ_START_DOCSTRING,
999
+ )
1000
+ class GPTJForCausalLM(GPTJPreTrainedModel):
1001
+ _tied_weights_keys = ["lm_head.weight"]
1002
+
1003
+ def __init__(self, config):
1004
+ super().__init__(config)
1005
+ self.transformer = GPTJModel(config)
1006
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
1007
+
1008
+ # Model parallel
1009
+ self.model_parallel = False
1010
+ self.device_map = None
1011
+
1012
+ # Initialize weights and apply final processing
1013
+ self.post_init()
1014
+
1015
+ @add_start_docstrings(PARALLELIZE_DOCSTRING)
1016
+ def parallelize(self, device_map=None):
1017
+ warnings.warn(
1018
+ "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
1019
+ " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
1020
+ " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
1021
+ " 0, 'transformer.h.1': 1, ...}",
1022
+ FutureWarning,
1023
+ )
1024
+ self.device_map = (
1025
+ get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
1026
+ if device_map is None
1027
+ else device_map
1028
+ )
1029
+ assert_device_map(self.device_map, len(self.transformer.h))
1030
+ self.transformer.parallelize(self.device_map)
1031
+ self.lm_head = self.lm_head.to(self.transformer.first_device)
1032
+ self.model_parallel = True
1033
+
1034
+ @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
1035
+ def deparallelize(self):
1036
+ warnings.warn(
1037
+ "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
1038
+ FutureWarning,
1039
+ )
1040
+ self.transformer.deparallelize()
1041
+ self.transformer = self.transformer.to("cpu")
1042
+ self.lm_head = self.lm_head.to("cpu")
1043
+ self.model_parallel = False
1044
+ torch.cuda.empty_cache()
1045
+
1046
+ def get_output_embeddings(self):
1047
+ return self.lm_head
1048
+
1049
+ def set_output_embeddings(self, new_embeddings):
1050
+ self.lm_head = new_embeddings
1051
+
1052
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
1053
+ token_type_ids = kwargs.get("token_type_ids", None)
1054
+ # Omit tokens covered by past_key_values
1055
+ if past_key_values:
1056
+ past_length = past_key_values[0][0].shape[2]
1057
+
1058
+ # Some generation methods already pass only the last input ID
1059
+ if input_ids.shape[1] > past_length:
1060
+ remove_prefix_length = past_length
1061
+ else:
1062
+ # Default to old behavior: keep only final ID
1063
+ remove_prefix_length = input_ids.shape[1] - 1
1064
+
1065
+ input_ids = input_ids[:, remove_prefix_length:]
1066
+ if token_type_ids is not None:
1067
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
1068
+
1069
+ attention_mask = kwargs.get("attention_mask", None)
1070
+ position_ids = kwargs.get("position_ids", None)
1071
+
1072
+ if attention_mask is not None and position_ids is None:
1073
+ # create position_ids on the fly for batch generation
1074
+ position_ids = attention_mask.long().cumsum(-1) - 1
1075
+ position_ids.masked_fill_(attention_mask == 0, 1)
1076
+ if past_key_values:
1077
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1078
+
1079
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1080
+ if inputs_embeds is not None and past_key_values is None:
1081
+ model_inputs = {"inputs_embeds": inputs_embeds}
1082
+ else:
1083
+ model_inputs = {"input_ids": input_ids}
1084
+
1085
+ model_inputs.update(
1086
+ {
1087
+ "past_key_values": past_key_values,
1088
+ "use_cache": kwargs.get("use_cache"),
1089
+ "position_ids": position_ids,
1090
+ "attention_mask": attention_mask,
1091
+ "token_type_ids": token_type_ids,
1092
+ }
1093
+ )
1094
+
1095
+ return model_inputs
1096
+
1097
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1098
+ @add_code_sample_docstrings(
1099
+ checkpoint=_CHECKPOINT_FOR_DOC,
1100
+ output_type=CausalLMOutputWithPast,
1101
+ config_class=_CONFIG_FOR_DOC,
1102
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1103
+ )
1104
+ def forward(
1105
+ self,
1106
+ input_ids: Optional[torch.LongTensor] = None,
1107
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1108
+ attention_mask: Optional[torch.FloatTensor] = None,
1109
+ token_type_ids: Optional[torch.LongTensor] = None,
1110
+ position_ids: Optional[torch.LongTensor] = None,
1111
+ head_mask: Optional[torch.FloatTensor] = None,
1112
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1113
+ labels: Optional[torch.LongTensor] = None,
1114
+ use_cache: Optional[bool] = None,
1115
+ output_attentions: Optional[bool] = None,
1116
+ output_hidden_states: Optional[bool] = None,
1117
+ return_dict: Optional[bool] = None,
1118
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1119
+ r"""
1120
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1121
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
1122
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
1123
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
1124
+ """
1125
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1126
+
1127
+ transformer_outputs = self.transformer(
1128
+ input_ids,
1129
+ past_key_values=past_key_values,
1130
+ attention_mask=attention_mask,
1131
+ token_type_ids=token_type_ids,
1132
+ position_ids=position_ids,
1133
+ head_mask=head_mask,
1134
+ inputs_embeds=inputs_embeds,
1135
+ use_cache=use_cache,
1136
+ output_attentions=output_attentions,
1137
+ output_hidden_states=output_hidden_states,
1138
+ return_dict=return_dict,
1139
+ )
1140
+ hidden_states = transformer_outputs[0]
1141
+
1142
+ # Set device for model parallelism
1143
+ if self.model_parallel:
1144
+ torch.cuda.set_device(self.transformer.first_device)
1145
+ hidden_states = hidden_states.to(self.lm_head.weight.device)
1146
+
1147
+ # make sure sampling in fp16 works correctly and
1148
+ # compute loss in fp32 to match with mesh-tf version
1149
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
1150
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
1151
+
1152
+ loss = None
1153
+ if labels is not None:
1154
+ # move labels to correct device to enable model parallelism
1155
+ labels = labels.to(lm_logits.device)
1156
+ # Shift so that tokens < n predict n
1157
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1158
+ shift_labels = labels[..., 1:].contiguous()
1159
+ # Flatten the tokens
1160
+ loss_fct = CrossEntropyLoss()
1161
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1162
+
1163
+ loss = loss.to(hidden_states.dtype)
1164
+
1165
+ if not return_dict:
1166
+ output = (lm_logits,) + transformer_outputs[1:]
1167
+ return ((loss,) + output) if loss is not None else output
1168
+
1169
+ return CausalLMOutputWithPast(
1170
+ loss=loss,
1171
+ logits=lm_logits,
1172
+ past_key_values=transformer_outputs.past_key_values,
1173
+ hidden_states=transformer_outputs.hidden_states,
1174
+ attentions=transformer_outputs.attentions,
1175
+ )
1176
+
1177
+ @staticmethod
1178
+ def _reorder_cache(
1179
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1180
+ ) -> Tuple[Tuple[torch.Tensor]]:
1181
+ """
1182
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
1183
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1184
+ beam_idx at every generation step.
1185
+ """
1186
+ return tuple(
1187
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1188
+ for layer_past in past_key_values
1189
+ )
1190
+
1191
+
1192
+ @add_start_docstrings(
1193
+ """
1194
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
1195
+
1196
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1197
+ (e.g. GPT, GPT-2, GPT-Neo) do.
1198
+
1199
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1200
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1201
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1202
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1203
+ each row of the batch).
1204
+ """,
1205
+ GPTJ_START_DOCSTRING,
1206
+ )
1207
+ class GPTJForSequenceClassification(GPTJPreTrainedModel):
1208
+ def __init__(self, config):
1209
+ super().__init__(config)
1210
+ self.num_labels = config.num_labels
1211
+ self.transformer = GPTJModel(config)
1212
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1213
+
1214
+ # Model parallel
1215
+ self.model_parallel = False
1216
+ self.device_map = None
1217
+
1218
+ # Initialize weights and apply final processing
1219
+ self.post_init()
1220
+
1221
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1222
+ @add_code_sample_docstrings(
1223
+ checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
1224
+ output_type=SequenceClassifierOutputWithPast,
1225
+ config_class=_CONFIG_FOR_DOC,
1226
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1227
+ )
1228
+ def forward(
1229
+ self,
1230
+ input_ids: Optional[torch.LongTensor] = None,
1231
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1232
+ attention_mask: Optional[torch.FloatTensor] = None,
1233
+ token_type_ids: Optional[torch.LongTensor] = None,
1234
+ position_ids: Optional[torch.LongTensor] = None,
1235
+ head_mask: Optional[torch.FloatTensor] = None,
1236
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1237
+ labels: Optional[torch.LongTensor] = None,
1238
+ use_cache: Optional[bool] = None,
1239
+ output_attentions: Optional[bool] = None,
1240
+ output_hidden_states: Optional[bool] = None,
1241
+ return_dict: Optional[bool] = None,
1242
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1243
+ r"""
1244
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1245
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1246
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1247
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1248
+ """
1249
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1250
+
1251
+ transformer_outputs = self.transformer(
1252
+ input_ids,
1253
+ past_key_values=past_key_values,
1254
+ attention_mask=attention_mask,
1255
+ token_type_ids=token_type_ids,
1256
+ position_ids=position_ids,
1257
+ head_mask=head_mask,
1258
+ inputs_embeds=inputs_embeds,
1259
+ use_cache=use_cache,
1260
+ output_attentions=output_attentions,
1261
+ output_hidden_states=output_hidden_states,
1262
+ return_dict=return_dict,
1263
+ )
1264
+ hidden_states = transformer_outputs[0]
1265
+ logits = self.score(hidden_states)
1266
+
1267
+ if input_ids is not None:
1268
+ batch_size = input_ids.shape[0]
1269
+ else:
1270
+ batch_size = inputs_embeds.shape[0]
1271
+
1272
+ if self.config.pad_token_id is None and batch_size != 1:
1273
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1274
+ if self.config.pad_token_id is None:
1275
+ sequence_lengths = -1
1276
+ else:
1277
+ if input_ids is not None:
1278
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1279
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1280
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1281
+ sequence_lengths = sequence_lengths.to(logits.device)
1282
+ else:
1283
+ sequence_lengths = -1
1284
+ logger.warning(
1285
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1286
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1287
+ )
1288
+
1289
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1290
+
1291
+ loss = None
1292
+ if labels is not None:
1293
+ labels = labels.to(pooled_logits.device)
1294
+ if self.config.problem_type is None:
1295
+ if self.num_labels == 1:
1296
+ self.config.problem_type = "regression"
1297
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1298
+ self.config.problem_type = "single_label_classification"
1299
+ else:
1300
+ self.config.problem_type = "multi_label_classification"
1301
+
1302
+ if self.config.problem_type == "regression":
1303
+ loss_fct = MSELoss()
1304
+ if self.num_labels == 1:
1305
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1306
+ else:
1307
+ loss = loss_fct(pooled_logits, labels)
1308
+ elif self.config.problem_type == "single_label_classification":
1309
+ loss_fct = CrossEntropyLoss()
1310
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1311
+ elif self.config.problem_type == "multi_label_classification":
1312
+ loss_fct = BCEWithLogitsLoss()
1313
+ loss = loss_fct(pooled_logits, labels)
1314
+ if not return_dict:
1315
+ output = (pooled_logits,) + transformer_outputs[1:]
1316
+ return ((loss,) + output) if loss is not None else output
1317
+
1318
+ return SequenceClassifierOutputWithPast(
1319
+ loss=loss,
1320
+ logits=pooled_logits,
1321
+ past_key_values=transformer_outputs.past_key_values,
1322
+ hidden_states=transformer_outputs.hidden_states,
1323
+ attentions=transformer_outputs.attentions,
1324
+ )
1325
+
1326
+
1327
+ @add_start_docstrings(
1328
+ """
1329
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1330
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1331
+ """,
1332
+ GPTJ_START_DOCSTRING,
1333
+ )
1334
+ class GPTJForQuestionAnswering(GPTJPreTrainedModel):
1335
+ def __init__(self, config):
1336
+ super().__init__(config)
1337
+ self.num_labels = config.num_labels
1338
+ self.transformer = GPTJModel(config)
1339
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1340
+
1341
+ # Model parallel
1342
+ self.model_parallel = False
1343
+ self.device_map = None
1344
+
1345
+ # Initialize weights and apply final processing
1346
+ self.post_init()
1347
+
1348
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1349
+ @add_code_sample_docstrings(
1350
+ checkpoint=_CHECKPOINT_FOR_DOC,
1351
+ output_type=QuestionAnsweringModelOutput,
1352
+ config_class=_CONFIG_FOR_DOC,
1353
+ real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
1354
+ )
1355
+ def forward(
1356
+ self,
1357
+ input_ids: Optional[torch.LongTensor] = None,
1358
+ attention_mask: Optional[torch.FloatTensor] = None,
1359
+ token_type_ids: Optional[torch.LongTensor] = None,
1360
+ position_ids: Optional[torch.LongTensor] = None,
1361
+ head_mask: Optional[torch.FloatTensor] = None,
1362
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1363
+ start_positions: Optional[torch.LongTensor] = None,
1364
+ end_positions: Optional[torch.LongTensor] = None,
1365
+ output_attentions: Optional[bool] = None,
1366
+ output_hidden_states: Optional[bool] = None,
1367
+ return_dict: Optional[bool] = None,
1368
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1369
+ r"""
1370
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1371
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1372
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1373
+ are not taken into account for computing the loss.
1374
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1375
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1376
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1377
+ are not taken into account for computing the loss.
1378
+ """
1379
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1380
+
1381
+ outputs = self.transformer(
1382
+ input_ids,
1383
+ attention_mask=attention_mask,
1384
+ token_type_ids=token_type_ids,
1385
+ position_ids=position_ids,
1386
+ head_mask=head_mask,
1387
+ inputs_embeds=inputs_embeds,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+
1393
+ sequence_output = outputs[0]
1394
+
1395
+ logits = self.qa_outputs(sequence_output)
1396
+ start_logits, end_logits = logits.split(1, dim=-1)
1397
+ start_logits = start_logits.squeeze(-1).contiguous()
1398
+ end_logits = end_logits.squeeze(-1).contiguous()
1399
+
1400
+ total_loss = None
1401
+ if start_positions is not None and end_positions is not None:
1402
+ # If we are on multi-GPU, split add a dimension
1403
+ if len(start_positions.size()) > 1:
1404
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1405
+ if len(end_positions.size()) > 1:
1406
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1407
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1408
+ ignored_index = start_logits.size(1)
1409
+ start_positions = start_positions.clamp(0, ignored_index)
1410
+ end_positions = end_positions.clamp(0, ignored_index)
1411
+
1412
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1413
+ start_loss = loss_fct(start_logits, start_positions)
1414
+ end_loss = loss_fct(end_logits, end_positions)
1415
+ total_loss = (start_loss + end_loss) / 2
1416
+
1417
+ if not return_dict:
1418
+ output = (start_logits, end_logits) + outputs[2:]
1419
+ return ((total_loss,) + output) if total_loss is not None else output
1420
+
1421
+ return QuestionAnsweringModelOutput(
1422
+ loss=total_loss,
1423
+ start_logits=start_logits,
1424
+ end_logits=end_logits,
1425
+ hidden_states=outputs.hidden_states,
1426
+ attentions=outputs.attentions,
1427
+ )
venv/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py ADDED
@@ -0,0 +1,1099 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 GPT-J model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+
24
+ from ...activations_tf import get_tf_activation
25
+ from ...file_utils import (
26
+ add_code_sample_docstrings,
27
+ add_start_docstrings,
28
+ add_start_docstrings_to_model_forward,
29
+ )
30
+ from ...modeling_tf_outputs import (
31
+ TFBaseModelOutputWithPast,
32
+ TFCausalLMOutputWithPast,
33
+ TFQuestionAnsweringModelOutput,
34
+ TFSequenceClassifierOutputWithPast,
35
+ )
36
+ from ...modeling_tf_utils import (
37
+ TFCausalLanguageModelingLoss,
38
+ TFModelInputType,
39
+ TFPreTrainedModel,
40
+ TFQuestionAnsweringLoss,
41
+ TFSequenceClassificationLoss,
42
+ TFSharedEmbeddings,
43
+ get_initializer,
44
+ keras,
45
+ keras_serializable,
46
+ unpack_inputs,
47
+ )
48
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
49
+ from ...utils import logging
50
+ from .configuration_gptj import GPTJConfig
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
56
+ _CONFIG_FOR_DOC = "GPTJConfig"
57
+
58
+
59
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
60
+ inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
61
+ sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
62
+ sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
63
+ out = tf.concat((sin, cos), axis=1)
64
+ return out
65
+
66
+
67
+ def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
68
+ rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
69
+ new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
70
+ rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
71
+ return rotate_half_tensor
72
+
73
+
74
+ def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
75
+ sin_pos, cos_pos = sincos
76
+ sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
77
+ cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
78
+ return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
79
+
80
+
81
+ class TFGPTJAttention(keras.layers.Layer):
82
+ def __init__(self, config: GPTJConfig, **kwargs):
83
+ super().__init__(**kwargs)
84
+
85
+ self.embed_dim = config.hidden_size
86
+ self.num_attention_heads = config.num_attention_heads
87
+ self.head_dim = self.embed_dim // self.num_attention_heads
88
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
89
+ raise ValueError(
90
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
91
+ f" `num_attention_heads`: {self.num_attention_heads})."
92
+ )
93
+ self.scale_attn = self.head_dim**0.5
94
+ self.rotary_dim = config.rotary_dim
95
+
96
+ self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
97
+ self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
98
+
99
+ self.q_proj = keras.layers.Dense(
100
+ self.embed_dim,
101
+ use_bias=False,
102
+ kernel_initializer=get_initializer(config.initializer_range),
103
+ name="q_proj",
104
+ )
105
+ self.k_proj = keras.layers.Dense(
106
+ self.embed_dim,
107
+ use_bias=False,
108
+ kernel_initializer=get_initializer(config.initializer_range),
109
+ name="k_proj",
110
+ )
111
+ self.v_proj = keras.layers.Dense(
112
+ self.embed_dim,
113
+ use_bias=False,
114
+ kernel_initializer=get_initializer(config.initializer_range),
115
+ name="v_proj",
116
+ )
117
+ self.out_proj = keras.layers.Dense(
118
+ self.embed_dim,
119
+ use_bias=False,
120
+ kernel_initializer=get_initializer(config.initializer_range),
121
+ name="out_proj",
122
+ )
123
+
124
+ self.max_positions = config.max_position_embeddings
125
+ self.lower_triangle_mask = tf.reshape(
126
+ tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
127
+ (1, 1, self.max_positions, self.max_positions),
128
+ )
129
+ pos_embd_dim = self.rotary_dim or self.embed_dim
130
+ self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
131
+
132
+ def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
133
+ return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
134
+
135
+ @staticmethod
136
+ def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
137
+ return tf.cast(tf.constant(-1e9), dtype)
138
+
139
+ def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
140
+ """
141
+ Splits hidden dim into attn_head_size and num_attention_heads
142
+ """
143
+ new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
144
+ hidden_states = tf.reshape(hidden_states, new_shape)
145
+ if rotary:
146
+ return hidden_states
147
+ if len(shape_list(hidden_states)) == 4:
148
+ return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
149
+ if len(shape_list(hidden_states)) == 5:
150
+ return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
151
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
152
+
153
+ def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
154
+ """
155
+ Merges attn_head_size dim and num_attn_heads dim into hidden dim
156
+ """
157
+ if len(shape_list(hidden_states)) == 4:
158
+ hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
159
+ elif len(shape_list(hidden_states)) == 5:
160
+ hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
161
+ else:
162
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
163
+ new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
164
+ return tf.reshape(hidden_states, new_shape)
165
+
166
+ def _attn(
167
+ self,
168
+ query: tf.Tensor,
169
+ key: tf.Tensor,
170
+ value: tf.Tensor,
171
+ attention_mask: tf.Tensor | None = None,
172
+ head_mask: tf.Tensor | None = None,
173
+ ) -> Tuple[tf.Tensor, tf.Tensor]:
174
+ # compute causal mask from causal mask buffer
175
+ query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
176
+ causal_mask = self.get_causal_mask(key_length, query_length)
177
+
178
+ # Keep the attention weights computation in fp32 to avoid overflow issues
179
+ query = tf.cast(query, tf.float32)
180
+ key = tf.cast(key, tf.float32)
181
+
182
+ attn_weights = tf.matmul(query, key, transpose_b=True)
183
+ attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
184
+
185
+ attn_weights = attn_weights / self.scale_attn
186
+
187
+ if attention_mask is not None:
188
+ # Apply the attention mask
189
+ attn_weights = attn_weights + attention_mask
190
+
191
+ attn_weights = stable_softmax(attn_weights, axis=-1)
192
+ attn_weights = tf.cast(attn_weights, value.dtype)
193
+ attn_weights = self.attn_dropout(attn_weights)
194
+
195
+ # Mask heads if we want to
196
+ if head_mask is not None:
197
+ attn_weights = attn_weights * head_mask
198
+
199
+ attn_output = tf.matmul(attn_weights, value)
200
+
201
+ return attn_output, attn_weights
202
+
203
+ def call(
204
+ self,
205
+ hidden_states: tf.Tensor,
206
+ layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
207
+ attention_mask: tf.Tensor | None = None,
208
+ position_ids: tf.Tensor | None = None,
209
+ head_mask: tf.Tensor | None = None,
210
+ use_cache: bool = False,
211
+ output_attentions: bool = False,
212
+ ):
213
+ query = self.q_proj(hidden_states)
214
+ key = self.k_proj(hidden_states)
215
+ value = self.v_proj(hidden_states)
216
+
217
+ query = self._split_heads(query, True)
218
+ key = self._split_heads(key, True)
219
+ value = self._split_heads(value, False)
220
+
221
+ sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
222
+ sincos = tf.split(sincos, 2, axis=-1)
223
+ if self.rotary_dim is not None:
224
+ k_rot = key[:, :, :, : self.rotary_dim]
225
+ k_pass = key[:, :, :, self.rotary_dim :]
226
+
227
+ q_rot = query[:, :, :, : self.rotary_dim]
228
+ q_pass = query[:, :, :, self.rotary_dim :]
229
+
230
+ k_rot = apply_rotary_pos_emb(k_rot, sincos)
231
+ q_rot = apply_rotary_pos_emb(q_rot, sincos)
232
+
233
+ key = tf.concat((k_rot, k_pass), axis=-1)
234
+ query = tf.concat((q_rot, q_pass), axis=-1)
235
+ else:
236
+ key = apply_rotary_pos_emb(key, sincos)
237
+ query = apply_rotary_pos_emb(query, sincos)
238
+
239
+ key = tf.transpose(key, (0, 2, 1, 3))
240
+ query = tf.transpose(query, (0, 2, 1, 3))
241
+
242
+ if layer_past is not None:
243
+ past_key = layer_past[0]
244
+ past_value = layer_past[1]
245
+ key = tf.concat((past_key, key), axis=-2)
246
+ value = tf.concat((past_value, value), axis=-2)
247
+
248
+ if use_cache is True:
249
+ present = (key, value)
250
+ else:
251
+ present = None
252
+
253
+ # compute self-attention: V x Softmax(QK^T)
254
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
255
+
256
+ attn_output = self._merge_heads(attn_output)
257
+ attn_output = self.out_proj(attn_output)
258
+ attn_output = self.resid_dropout(attn_output)
259
+
260
+ outputs = (attn_output, present)
261
+ if output_attentions:
262
+ outputs += (attn_weights,)
263
+
264
+ return outputs # a, present, (attentions)
265
+
266
+ def build(self, input_shape=None):
267
+ if self.built:
268
+ return
269
+ self.built = True
270
+ if getattr(self, "q_proj", None) is not None:
271
+ with tf.name_scope(self.q_proj.name):
272
+ self.q_proj.build([None, None, self.embed_dim])
273
+ if getattr(self, "k_proj", None) is not None:
274
+ with tf.name_scope(self.k_proj.name):
275
+ self.k_proj.build([None, None, self.embed_dim])
276
+ if getattr(self, "v_proj", None) is not None:
277
+ with tf.name_scope(self.v_proj.name):
278
+ self.v_proj.build([None, None, self.embed_dim])
279
+ if getattr(self, "out_proj", None) is not None:
280
+ with tf.name_scope(self.out_proj.name):
281
+ self.out_proj.build([None, None, self.embed_dim])
282
+
283
+
284
+ class TFGPTJMLP(keras.layers.Layer):
285
+ def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
286
+ super().__init__(**kwargs)
287
+ embed_dim = config.n_embd
288
+
289
+ self.fc_in = keras.layers.Dense(
290
+ intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
291
+ )
292
+ self.fc_out = keras.layers.Dense(
293
+ embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
294
+ )
295
+
296
+ self.act = get_tf_activation(config.activation_function)
297
+ self.dropout = keras.layers.Dropout(config.embd_pdrop)
298
+ self.embed_dim = config.n_embd
299
+ self.intermediate_size = intermediate_size
300
+
301
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
302
+ hidden_states = self.fc_in(hidden_states)
303
+ hidden_states = self.act(hidden_states)
304
+ hidden_states = self.fc_out(hidden_states)
305
+ hidden_states = self.dropout(hidden_states)
306
+ return hidden_states
307
+
308
+ def build(self, input_shape=None):
309
+ if self.built:
310
+ return
311
+ self.built = True
312
+ if getattr(self, "fc_in", None) is not None:
313
+ with tf.name_scope(self.fc_in.name):
314
+ self.fc_in.build([None, None, self.embed_dim])
315
+ if getattr(self, "fc_out", None) is not None:
316
+ with tf.name_scope(self.fc_out.name):
317
+ self.fc_out.build([None, None, self.intermediate_size])
318
+
319
+
320
+ class TFGPTJBlock(keras.layers.Layer):
321
+ def __init__(self, config: GPTJConfig, **kwargs):
322
+ super().__init__(**kwargs)
323
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
324
+ self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
325
+ self.attn = TFGPTJAttention(config, name="attn")
326
+ self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
327
+ self.config = config
328
+
329
+ def call(
330
+ self,
331
+ hidden_states: tf.Tensor,
332
+ layer_past: tf.Tensor | None = None,
333
+ attention_mask: tf.Tensor | None = None,
334
+ position_ids: tf.Tensor | None = None,
335
+ head_mask: tf.Tensor | None = None,
336
+ use_cache: bool = False,
337
+ output_attentions: bool = False,
338
+ ):
339
+ residual = hidden_states
340
+ hidden_states = self.ln_1(hidden_states)
341
+ attn_outputs = self.attn(
342
+ hidden_states=hidden_states,
343
+ layer_past=layer_past,
344
+ attention_mask=attention_mask,
345
+ position_ids=position_ids,
346
+ head_mask=head_mask,
347
+ use_cache=use_cache,
348
+ output_attentions=output_attentions,
349
+ ) # attn_outputs: attn_output, present, (attentions)
350
+ attn_output = attn_outputs[0]
351
+ outputs = attn_outputs[1:]
352
+
353
+ feed_forward_hidden_states = self.mlp(hidden_states)
354
+ hidden_states = attn_output + feed_forward_hidden_states + residual
355
+
356
+ if use_cache:
357
+ outputs = (hidden_states,) + outputs
358
+ else:
359
+ outputs = (hidden_states,) + outputs[1:]
360
+ return outputs # hidden_states, present, (attentions)
361
+
362
+ def build(self, input_shape=None):
363
+ if self.built:
364
+ return
365
+ self.built = True
366
+ if getattr(self, "ln_1", None) is not None:
367
+ with tf.name_scope(self.ln_1.name):
368
+ self.ln_1.build([None, None, self.config.n_embd])
369
+ if getattr(self, "attn", None) is not None:
370
+ with tf.name_scope(self.attn.name):
371
+ self.attn.build(None)
372
+ if getattr(self, "mlp", None) is not None:
373
+ with tf.name_scope(self.mlp.name):
374
+ self.mlp.build(None)
375
+
376
+
377
+ @keras_serializable
378
+ class TFGPTJMainLayer(keras.layers.Layer):
379
+ config_class = GPTJConfig
380
+
381
+ def __init__(self, config: GPTJConfig, *inputs, **kwargs):
382
+ super().__init__(*inputs, **kwargs)
383
+
384
+ self.config = config
385
+ self.output_attentions = config.output_attentions
386
+ self.output_hidden_states = config.output_hidden_states
387
+ self.use_cache = config.use_cache
388
+ self.return_dict = config.use_return_dict
389
+
390
+ self.num_hidden_layers = config.n_layer
391
+ self.n_embd = config.n_embd
392
+ self.n_positions = config.n_positions
393
+ self.initializer_range = config.initializer_range
394
+
395
+ self.wte = TFSharedEmbeddings(
396
+ config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
397
+ )
398
+ self.drop = keras.layers.Dropout(config.embd_pdrop)
399
+ self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
400
+ self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
401
+ self.embed_dim = config.n_embd
402
+
403
+ def get_input_embeddings(self):
404
+ return self.wte
405
+
406
+ def set_input_embeddings(self, value: tf.Tensor):
407
+ self.wte.weight = value
408
+ self.wte.vocab_size = shape_list(value)[0]
409
+
410
+ def _prune_heads(self, heads_to_prune):
411
+ """
412
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
413
+ """
414
+ raise NotImplementedError
415
+
416
+ @unpack_inputs
417
+ def call(
418
+ self,
419
+ input_ids=None,
420
+ past_key_values=None,
421
+ attention_mask=None,
422
+ token_type_ids=None,
423
+ position_ids=None,
424
+ head_mask=None,
425
+ inputs_embeds=None,
426
+ use_cache=None,
427
+ output_attentions=None,
428
+ output_hidden_states=None,
429
+ return_dict=None,
430
+ training=False,
431
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
432
+ if input_ids is not None and inputs_embeds is not None:
433
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
434
+ elif input_ids is not None:
435
+ input_shape = shape_list(input_ids)
436
+ input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
437
+ elif inputs_embeds is not None:
438
+ input_shape = shape_list(inputs_embeds)[:-1]
439
+ else:
440
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
441
+
442
+ if past_key_values is None:
443
+ past_length = 0
444
+ past_key_values = [None] * len(self.h)
445
+ else:
446
+ past_length = shape_list(past_key_values[0][0])[-2]
447
+
448
+ if position_ids is None:
449
+ position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
450
+
451
+ if attention_mask is not None:
452
+ # We create a 3D attention mask from a 2D tensor mask.
453
+ # Sizes are [batch_size, 1, 1, to_seq_length]
454
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
455
+ # this attention mask is more simple than the triangular masking of causal attention
456
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
457
+ attention_mask_shape = shape_list(attention_mask)
458
+ attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
459
+
460
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
461
+ # masked positions, this operation will create a tensor which is 0.0 for
462
+ # positions we want to attend and -10000.0 for masked positions.
463
+ # Since we are adding it to the raw scores before the softmax, this is
464
+ # effectively the same as removing these entirely.
465
+ one_cst = tf.constant(1.0)
466
+ attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
467
+ attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
468
+
469
+ # Prepare head mask if needed
470
+ # 1.0 in head_mask indicate we keep the head
471
+ # attention_probs has shape bsz x n_heads x N x N
472
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
473
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
474
+ if head_mask is not None:
475
+ raise NotImplementedError
476
+ else:
477
+ head_mask = [None] * self.num_hidden_layers
478
+ # head_mask = tf.constant([0] * self.num_hidden_layers)
479
+
480
+ position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
481
+
482
+ if inputs_embeds is None:
483
+ check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
484
+ inputs_embeds = self.wte(input_ids, mode="embedding")
485
+
486
+ if token_type_ids is not None:
487
+ token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
488
+ token_type_embeds = self.wte(token_type_ids, mode="embedding")
489
+ else:
490
+ token_type_embeds = tf.constant(0.0)
491
+
492
+ token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
493
+ hidden_states = inputs_embeds + token_type_embeds
494
+ hidden_states = self.drop(hidden_states, training=training)
495
+
496
+ output_shape = input_shape + [shape_list(hidden_states)[-1]]
497
+
498
+ presents = () if use_cache else None
499
+ all_attentions = () if output_attentions else None
500
+ all_hidden_states = () if output_hidden_states else None
501
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
502
+ if output_hidden_states:
503
+ all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
504
+
505
+ outputs = block(
506
+ hidden_states=hidden_states,
507
+ layer_past=layer_past,
508
+ attention_mask=attention_mask,
509
+ position_ids=position_ids,
510
+ head_mask=head_mask[i],
511
+ use_cache=use_cache,
512
+ output_attentions=output_attentions,
513
+ training=training,
514
+ )
515
+
516
+ hidden_states = outputs[0]
517
+ if use_cache:
518
+ presents = presents + (outputs[1],)
519
+
520
+ if output_attentions:
521
+ all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
522
+
523
+ hidden_states = self.ln_f(hidden_states)
524
+
525
+ hidden_states = tf.reshape(hidden_states, output_shape)
526
+ # Add last hidden state
527
+ if output_hidden_states:
528
+ all_hidden_states = all_hidden_states + (hidden_states,)
529
+
530
+ if output_attentions:
531
+ # let the number of heads free (-1) so we can extract attention even after head pruning
532
+ attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
533
+ all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
534
+
535
+ if not return_dict:
536
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
537
+
538
+ return TFBaseModelOutputWithPast(
539
+ last_hidden_state=hidden_states,
540
+ past_key_values=presents,
541
+ hidden_states=all_hidden_states,
542
+ attentions=all_attentions,
543
+ )
544
+
545
+ def build(self, input_shape=None):
546
+ if self.built:
547
+ return
548
+ self.built = True
549
+ if getattr(self, "wte", None) is not None:
550
+ with tf.name_scope(self.wte.name):
551
+ self.wte.build(None)
552
+ if getattr(self, "ln_f", None) is not None:
553
+ with tf.name_scope(self.ln_f.name):
554
+ self.ln_f.build([None, None, self.embed_dim])
555
+ if getattr(self, "h", None) is not None:
556
+ for layer in self.h:
557
+ with tf.name_scope(layer.name):
558
+ layer.build(None)
559
+
560
+
561
+ class TFGPTJPreTrainedModel(TFPreTrainedModel):
562
+ """
563
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
564
+ models.
565
+ """
566
+
567
+ config_class = GPTJConfig
568
+ base_model_prefix = "transformer"
569
+ # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
570
+ _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
571
+
572
+
573
+ GPTJ_START_DOCSTRING = r"""
574
+
575
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
576
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
577
+ etc.)
578
+
579
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
580
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
581
+ behavior.
582
+
583
+ <Tip>
584
+
585
+ TensorFlow models and layers in `transformers` accept two formats as input:
586
+
587
+ - having all inputs as keyword arguments (like PyTorch models), or
588
+ - having all inputs as a list, tuple or dict in the first positional argument.
589
+
590
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
591
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
592
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
593
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
594
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
595
+ positional argument:
596
+
597
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
598
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
599
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
600
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
601
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
602
+
603
+ Note that when creating models and layers with
604
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
605
+ about any of this, as you can just pass inputs like you would to any other Python function!
606
+
607
+ </Tip>
608
+
609
+ Parameters:
610
+ config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
611
+ Initializing with a config file does not load the weights associated with the model, only the
612
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
613
+ """
614
+
615
+ GPTJ_INPUTS_DOCSTRING = r"""
616
+ Args:
617
+ input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
618
+ `input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
619
+ input past key value states). Indices of input sequence tokens in the vocabulary.
620
+
621
+ If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
622
+
623
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
624
+ [`PreTrainedTokenizer.encode`] for details.
625
+
626
+ [What are input IDs?](../glossary#input-ids)
627
+ past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
628
+ Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
629
+ `past` output below). Can be used to speed up sequential decoding. The token ids which have their past
630
+ given to this model should not be passed as input ids as they have already been computed.
631
+ attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
639
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
640
+ 1]`:
641
+
642
+ - 0 corresponds to a *sentence A* token,
643
+ - 1 corresponds to a *sentence B* token.
644
+
645
+ [What are token type IDs?](../glossary#token-type-ids)
646
+ position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
647
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
648
+ config.max_position_embeddings - 1]`.
649
+
650
+ [What are position IDs?](../glossary#position-ids)
651
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
652
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
653
+
654
+ - 1 indicates the head is **not masked**,
655
+ - 0 indicates the head is **masked**.
656
+
657
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
658
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
659
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
660
+ model's internal embedding lookup matrix.
661
+ output_attentions (`bool`, *optional*):
662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
663
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
664
+ config will be used instead.
665
+ output_hidden_states (`bool`, *optional*):
666
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
667
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
668
+ used instead.
669
+ return_dict (`bool`, *optional*):
670
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
671
+ in eager mode, in graph mode the value will always be set to True.
672
+ training (`bool`, *optional*, defaults to `False`):
673
+ Whether or not to use the model in training mode (some modules like dropout modules have different
674
+ behaviors between training and evaluation).
675
+ """
676
+
677
+
678
+ @add_start_docstrings(
679
+ "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
680
+ GPTJ_START_DOCSTRING,
681
+ )
682
+ class TFGPTJModel(TFGPTJPreTrainedModel):
683
+ def __init__(self, config, *inputs, **kwargs):
684
+ super().__init__(config, *inputs, **kwargs)
685
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
686
+
687
+ @unpack_inputs
688
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
689
+ @add_code_sample_docstrings(
690
+ checkpoint=_CHECKPOINT_FOR_DOC,
691
+ output_type=TFBaseModelOutputWithPast,
692
+ config_class=_CONFIG_FOR_DOC,
693
+ )
694
+ def call(
695
+ self,
696
+ input_ids: TFModelInputType | None = None,
697
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
698
+ attention_mask: np.ndarray | tf.Tensor | None = None,
699
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
700
+ position_ids: np.ndarray | tf.Tensor | None = None,
701
+ head_mask: np.ndarray | tf.Tensor | None = None,
702
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
703
+ use_cache: Optional[bool] = None,
704
+ output_attentions: Optional[bool] = None,
705
+ output_hidden_states: Optional[bool] = None,
706
+ return_dict: Optional[bool] = None,
707
+ training: Optional[bool] = False,
708
+ ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
709
+ r"""
710
+ use_cache (`bool`, *optional*, defaults to `True`):
711
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
712
+ `past`). Set to `False` during training, `True` during generation
713
+ """
714
+
715
+ outputs = self.transformer(
716
+ input_ids=input_ids,
717
+ past_key_values=past_key_values,
718
+ attention_mask=attention_mask,
719
+ token_type_ids=token_type_ids,
720
+ position_ids=position_ids,
721
+ head_mask=head_mask,
722
+ inputs_embeds=inputs_embeds,
723
+ use_cache=use_cache,
724
+ output_attentions=output_attentions,
725
+ output_hidden_states=output_hidden_states,
726
+ return_dict=return_dict,
727
+ training=training,
728
+ )
729
+
730
+ return outputs
731
+
732
+ def build(self, input_shape=None):
733
+ if self.built:
734
+ return
735
+ self.built = True
736
+ if getattr(self, "transformer", None) is not None:
737
+ with tf.name_scope(self.transformer.name):
738
+ self.transformer.build(None)
739
+
740
+
741
+ @add_start_docstrings(
742
+ """
743
+ The GPT-J Model transformer with a language modeling head on top.
744
+ """,
745
+ GPTJ_START_DOCSTRING,
746
+ )
747
+ class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
748
+ def __init__(self, config, *inputs, **kwargs):
749
+ super().__init__(config, *inputs, **kwargs)
750
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
751
+ self.lm_head = keras.layers.Dense(
752
+ config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
753
+ )
754
+ self.config = config
755
+
756
+ def get_output_embeddings(self):
757
+ return self.lm_head
758
+
759
+ def set_output_embeddings(self, new_embeddings):
760
+ self.lm_head = new_embeddings
761
+
762
+ def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
763
+ token_type_ids = kwargs.get("token_type_ids", None)
764
+ # only last token for inputs_ids if past is defined in kwargs
765
+ if past_key_values:
766
+ inputs = tf.expand_dims(inputs[:, -1], -1)
767
+ if token_type_ids is not None:
768
+ token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
769
+
770
+ position_ids = kwargs.get("position_ids", None)
771
+ attention_mask = kwargs.get("attention_mask", None)
772
+
773
+ if attention_mask is not None and position_ids is None:
774
+ position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
775
+ if past_key_values:
776
+ position_ids = tf.expand_dims(position_ids[:, -1], -1)
777
+
778
+ return {
779
+ "input_ids": inputs,
780
+ "attention_mask": attention_mask,
781
+ "position_ids": position_ids,
782
+ "past_key_values": past_key_values,
783
+ "use_cache": use_cache,
784
+ "token_type_ids": token_type_ids,
785
+ }
786
+
787
+ @unpack_inputs
788
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
789
+ @add_code_sample_docstrings(
790
+ checkpoint=_CHECKPOINT_FOR_DOC,
791
+ output_type=TFCausalLMOutputWithPast,
792
+ config_class=_CONFIG_FOR_DOC,
793
+ )
794
+ def call(
795
+ self,
796
+ input_ids: TFModelInputType | None = None,
797
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
798
+ attention_mask: np.ndarray | tf.Tensor | None = None,
799
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
800
+ position_ids: np.ndarray | tf.Tensor | None = None,
801
+ head_mask: np.ndarray | tf.Tensor | None = None,
802
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
803
+ labels: np.ndarray | tf.Tensor | None = None,
804
+ use_cache: Optional[bool] = None,
805
+ output_attentions: Optional[bool] = None,
806
+ output_hidden_states: Optional[bool] = None,
807
+ return_dict: Optional[bool] = None,
808
+ training: Optional[bool] = False,
809
+ ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
810
+ r"""
811
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
812
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
813
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
814
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
815
+ """
816
+
817
+ transformer_outputs = self.transformer(
818
+ input_ids=input_ids,
819
+ past_key_values=past_key_values,
820
+ attention_mask=attention_mask,
821
+ token_type_ids=token_type_ids,
822
+ position_ids=position_ids,
823
+ head_mask=head_mask,
824
+ inputs_embeds=inputs_embeds,
825
+ use_cache=use_cache,
826
+ output_attentions=output_attentions,
827
+ output_hidden_states=output_hidden_states,
828
+ return_dict=return_dict,
829
+ training=training,
830
+ )
831
+ hidden_states = transformer_outputs[0]
832
+ lm_logits = self.lm_head(hidden_states)
833
+
834
+ loss = None
835
+ if labels is not None:
836
+ # shift labels to the left and cut last logit token
837
+ shifted_logits = lm_logits[:, :-1]
838
+ labels = labels[:, 1:]
839
+ loss = self.hf_compute_loss(labels, shifted_logits)
840
+
841
+ if not return_dict:
842
+ output = (lm_logits,) + transformer_outputs[1:]
843
+ return ((loss,) + output) if loss is not None else output
844
+
845
+ return TFCausalLMOutputWithPast(
846
+ loss=loss,
847
+ logits=lm_logits,
848
+ past_key_values=transformer_outputs.past_key_values,
849
+ hidden_states=transformer_outputs.hidden_states,
850
+ attentions=transformer_outputs.attentions,
851
+ )
852
+
853
+ def build(self, input_shape=None):
854
+ if self.built:
855
+ return
856
+ self.built = True
857
+ if getattr(self, "transformer", None) is not None:
858
+ with tf.name_scope(self.transformer.name):
859
+ self.transformer.build(None)
860
+ if getattr(self, "lm_head", None) is not None:
861
+ with tf.name_scope(self.lm_head.name):
862
+ self.lm_head.build([None, None, self.config.n_embd])
863
+
864
+
865
+ @add_start_docstrings(
866
+ """
867
+ The GPT-J Model transformer with a sequence classification head on top (linear layer).
868
+
869
+ [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
870
+ (e.g. GPT, GPT-2, GPT-Neo) do.
871
+
872
+ Since it does classification on the last token, it requires to know the position of the last token. If a
873
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
874
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
875
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
876
+ each row of the batch).
877
+ """,
878
+ GPTJ_START_DOCSTRING,
879
+ )
880
+ class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
881
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
882
+
883
+ def __init__(self, config, *inputs, **kwargs):
884
+ super().__init__(config, *inputs, **kwargs)
885
+ self.num_labels = config.num_labels
886
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
887
+ self.score = keras.layers.Dense(
888
+ self.num_labels,
889
+ use_bias=False,
890
+ kernel_initializer=get_initializer(config.initializer_range),
891
+ name="score",
892
+ )
893
+ self.config = config
894
+
895
+ @unpack_inputs
896
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
897
+ @add_code_sample_docstrings(
898
+ checkpoint=_CHECKPOINT_FOR_DOC,
899
+ output_type=TFSequenceClassifierOutputWithPast,
900
+ config_class=_CONFIG_FOR_DOC,
901
+ )
902
+ def call(
903
+ self,
904
+ input_ids: TFModelInputType | None = None,
905
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
906
+ attention_mask: np.ndarray | tf.Tensor | None = None,
907
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
908
+ position_ids: np.ndarray | tf.Tensor | None = None,
909
+ head_mask: np.ndarray | tf.Tensor | None = None,
910
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
911
+ labels: np.ndarray | tf.Tensor | None = None,
912
+ use_cache: Optional[bool] = None,
913
+ output_attentions: Optional[bool] = None,
914
+ output_hidden_states: Optional[bool] = None,
915
+ return_dict: Optional[bool] = None,
916
+ training: Optional[bool] = False,
917
+ ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
918
+ r"""
919
+ labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
920
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
921
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
922
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
923
+ """
924
+
925
+ transformer_outputs = self.transformer(
926
+ input_ids=input_ids,
927
+ past_key_values=past_key_values,
928
+ attention_mask=attention_mask,
929
+ token_type_ids=token_type_ids,
930
+ position_ids=position_ids,
931
+ head_mask=head_mask,
932
+ inputs_embeds=inputs_embeds,
933
+ use_cache=use_cache,
934
+ output_attentions=output_attentions,
935
+ output_hidden_states=output_hidden_states,
936
+ return_dict=return_dict,
937
+ training=training,
938
+ )
939
+ hidden_states = transformer_outputs[0]
940
+ logits = self.score(hidden_states)
941
+ logits_shape = shape_list(logits)
942
+ in_logits = None
943
+ if self.config.pad_token_id is None:
944
+ sequence_lengths = -1
945
+ else:
946
+ if input_ids is not None:
947
+ sequence_lengths = (
948
+ tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
949
+ - 1
950
+ )
951
+ sequence_lengths = tf.where(
952
+ sequence_lengths >= 0,
953
+ sequence_lengths,
954
+ tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
955
+ )
956
+ in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
957
+ else:
958
+ sequence_lengths = -1
959
+ logger.warning(
960
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
961
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
962
+ )
963
+ loss = None
964
+
965
+ if labels is not None:
966
+ if self.config.pad_token_id is None and logits_shape[0] != 1:
967
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
968
+
969
+ if not tf.is_tensor(sequence_lengths):
970
+ in_logits = logits[0 : logits_shape[0], sequence_lengths]
971
+
972
+ loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
973
+ pooled_logits = in_logits if in_logits is not None else logits
974
+
975
+ if not return_dict:
976
+ output = (pooled_logits,) + transformer_outputs[1:]
977
+ return ((loss,) + output) if loss is not None else output
978
+
979
+ return TFSequenceClassifierOutputWithPast(
980
+ loss=loss,
981
+ logits=pooled_logits,
982
+ past_key_values=transformer_outputs.past_key_values,
983
+ hidden_states=transformer_outputs.hidden_states,
984
+ attentions=transformer_outputs.attentions,
985
+ )
986
+
987
+ def build(self, input_shape=None):
988
+ if self.built:
989
+ return
990
+ self.built = True
991
+ if getattr(self, "transformer", None) is not None:
992
+ with tf.name_scope(self.transformer.name):
993
+ self.transformer.build(None)
994
+ if getattr(self, "score", None) is not None:
995
+ with tf.name_scope(self.score.name):
996
+ self.score.build([None, None, self.config.n_embd])
997
+
998
+
999
+ @add_start_docstrings(
1000
+ """
1001
+ The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
1002
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1003
+ """,
1004
+ GPTJ_START_DOCSTRING,
1005
+ )
1006
+ class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
1007
+ _keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
1008
+
1009
+ def __init__(self, config, *inputs, **kwargs):
1010
+ super().__init__(config, *inputs, **kwargs)
1011
+ self.num_labels = config.num_labels
1012
+ self.transformer = TFGPTJMainLayer(config, name="transformer")
1013
+ self.qa_outputs = keras.layers.Dense(
1014
+ self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
1015
+ )
1016
+ self.config = config
1017
+
1018
+ @unpack_inputs
1019
+ @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1020
+ @add_code_sample_docstrings(
1021
+ checkpoint=_CHECKPOINT_FOR_DOC,
1022
+ output_type=TFQuestionAnsweringModelOutput,
1023
+ config_class=_CONFIG_FOR_DOC,
1024
+ )
1025
+ def call(
1026
+ self,
1027
+ input_ids: TFModelInputType | None = None,
1028
+ past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
1029
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1030
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1031
+ position_ids: np.ndarray | tf.Tensor | None = None,
1032
+ head_mask: np.ndarray | tf.Tensor | None = None,
1033
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1034
+ start_positions: np.ndarray | tf.Tensor | None = None,
1035
+ end_positions: np.ndarray | tf.Tensor | None = None,
1036
+ output_attentions: Optional[bool] = None,
1037
+ output_hidden_states: Optional[bool] = None,
1038
+ return_dict: Optional[bool] = None,
1039
+ training: Optional[bool] = False,
1040
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
1041
+ r"""
1042
+ start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1043
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1044
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1045
+ are not taken into account for computing the loss.
1046
+ end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
1047
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1048
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1049
+ are not taken into account for computing the loss.
1050
+ """
1051
+
1052
+ transformer_outputs = self.transformer(
1053
+ input_ids=input_ids,
1054
+ past_key_values=past_key_values,
1055
+ attention_mask=attention_mask,
1056
+ token_type_ids=token_type_ids,
1057
+ position_ids=position_ids,
1058
+ head_mask=head_mask,
1059
+ inputs_embeds=inputs_embeds,
1060
+ output_attentions=output_attentions,
1061
+ output_hidden_states=output_hidden_states,
1062
+ return_dict=return_dict,
1063
+ training=training,
1064
+ )
1065
+ sequence_output = transformer_outputs[0]
1066
+
1067
+ logits = self.qa_outputs(sequence_output)
1068
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
1069
+ start_logits = tf.squeeze(start_logits, axis=-1)
1070
+ end_logits = tf.squeeze(end_logits, axis=-1)
1071
+
1072
+ loss = None
1073
+ if start_positions is not None and end_positions is not None:
1074
+ labels = {"start_position": start_positions}
1075
+ labels["end_position"] = end_positions
1076
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
1077
+
1078
+ if not return_dict:
1079
+ output = (start_logits, end_logits) + transformer_outputs[2:]
1080
+ return ((loss,) + output) if loss is not None else output
1081
+
1082
+ return TFQuestionAnsweringModelOutput(
1083
+ loss=loss,
1084
+ start_logits=start_logits,
1085
+ end_logits=end_logits,
1086
+ hidden_states=transformer_outputs.hidden_states,
1087
+ attentions=transformer_outputs.attentions,
1088
+ )
1089
+
1090
+ def build(self, input_shape=None):
1091
+ if self.built:
1092
+ return
1093
+ self.built = True
1094
+ if getattr(self, "transformer", None) is not None:
1095
+ with tf.name_scope(self.transformer.name):
1096
+ self.transformer.build(None)
1097
+ if getattr(self, "qa_outputs", None) is not None:
1098
+ with tf.name_scope(self.qa_outputs.name):
1099
+ self.qa_outputs.build([None, None, self.config.hidden_size])
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "configuration_gptsan_japanese": ["GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTSanJapaneseConfig"],
28
+ "tokenization_gptsan_japanese": ["GPTSanJapaneseTokenizer"],
29
+ }
30
+
31
+ try:
32
+ if not is_torch_available():
33
+ raise OptionalDependencyNotAvailable()
34
+ except OptionalDependencyNotAvailable:
35
+ pass
36
+ else:
37
+ _import_structure["modeling_gptsan_japanese"] = [
38
+ "GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
39
+ "GPTSanJapaneseForConditionalGeneration",
40
+ "GPTSanJapaneseModel",
41
+ "GPTSanJapanesePreTrainedModel",
42
+ ]
43
+ _import_structure["tokenization_gptsan_japanese"] = [
44
+ "GPTSanJapaneseTokenizer",
45
+ ]
46
+
47
+
48
+ if TYPE_CHECKING:
49
+ from .configuration_gptsan_japanese import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTSanJapaneseConfig
50
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
51
+
52
+ try:
53
+ if not is_torch_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .modeling_gptsan_japanese import (
59
+ GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
60
+ GPTSanJapaneseForConditionalGeneration,
61
+ GPTSanJapaneseModel,
62
+ GPTSanJapanesePreTrainedModel,
63
+ )
64
+ from .tokenization_gptsan_japanese import GPTSanJapaneseTokenizer
65
+
66
+
67
+ else:
68
+ import sys
69
+
70
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/configuration_gptsan_japanese.cpython-310.pyc ADDED
Binary file (6.17 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/convert_gptsan_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/modeling_gptsan_japanese.cpython-310.pyc ADDED
Binary file (45.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/__pycache__/tokenization_gptsan_japanese.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/configuration_gptsan_japanese.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023, HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ GPTSAN-japanese model configuration"""
16
+ from ...configuration_utils import PretrainedConfig
17
+ from ...utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
24
+
25
+
26
+ class GPTSanJapaneseConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`GPTSanJapaneseModel`]. It is used to instantiate
29
+ a GPTSANJapanese model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the GPTSANJapanese
31
+ [Tanrei/GPTSAN-japanese](https://huggingface.co/Tanrei/GPTSAN-japanese) architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+ Arguments:
37
+ vocab_size (`int`, *optional*, defaults to 36000):
38
+ Vocabulary size of the GPTSANJapanese model. Defines the number of different tokens that can be represented
39
+ by the `inputs_ids` passed when calling [`GPTSanJapaneseModel`].
40
+ max_position_embeddings (`int`, *optional*, defaults to 1280):
41
+ The maximum sequence length that this model might ever be used with. Defaults set this to 1280.
42
+ d_model (`int`, *optional*, defaults to 1024):
43
+ Size of the encoder layers and the pooler layer.
44
+ d_ff (`int`, *optional*, defaults to 8192):
45
+ Size of the intermediate feed forward layer in each `SwitchTransformersBlock`.
46
+ d_ext (`int`, *optional*, defaults to 4096):
47
+ Size of the intermediate feed forward layer in each Extra-layers.
48
+ d_spout (`int`, *optional*, defaults to 128):
49
+ Size of the `spout` vector.
50
+ num_switch_layers (`int`, *optional*, defaults to 10):
51
+ Number of layers in the Switch Transformer layer.
52
+ num_ext_layers (`int`, *optional*, defaults to 0):
53
+ Number of layers in the Extra-layers.
54
+ num_heads (`int`, *optional*, defaults to 16):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ num_experts (`int`, *optional*, defaults to 16):
57
+ Number of experts for each SwitchTransformer layer.
58
+ expert_capacity (`int`, *optional*, defaults to 128):
59
+ Number of tokens that can be stored in each expert. If set to 1, the model will behave like a regular
60
+ Transformer.
61
+ dropout_rate (`float`, *optional*, defaults to 0.0):
62
+ The ratio for all dropout layers.
63
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
64
+ The epsilon used by the layer normalization layers.
65
+ router_bias (`bool`, *optional*, defaults to `False`):
66
+ Whether to add a bias to the router.
67
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
68
+ Amount of noise to add to the router. Set it to 0.0 during prediction or set small value (usually 1e-2)
69
+ during training.
70
+ router_dtype (`str`, *optional*, default to `"float32"`):
71
+ The `dtype` used for the routers. It is preferable to keep the `dtype` to `"float32"` as specified in the
72
+ *selective precision* discussion in [the paper](https://arxiv.org/abs/2101.03961).
73
+ router_ignore_padding_tokens (`bool`, *optional*, defaults to `False`):
74
+ Whether to ignore padding tokens when routing.
75
+ output_hidden_states (`bool`, *optional*, default to `False`):
76
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
77
+ more detail.
78
+ output_attentions (`bool`, *optional*, defaults to `False`):
79
+ Whether or not to return the attentions tensors of all attention layers.
80
+ initializer_factor (`float`, *optional*, defaults to 0.002):
81
+ A factor for initializing all weight matrices.
82
+ output_router_logits (`bool`, *optional*, default to `False`):
83
+ Whether or not to return the router logits of all experts.
84
+ use_cache (`bool`, *optional*, defaults to `True`):
85
+ Whether or not the model should return the last key/values attentions (not used by all models)
86
+ """
87
+
88
+ model_type = "gptsan-japanese"
89
+ keys_to_ignore_at_inference = [
90
+ "past_key_values",
91
+ ]
92
+ attribute_map = {
93
+ "hidden_size": "d_model",
94
+ "num_attention_heads": "num_heads",
95
+ "num_hidden_layers": "num_layers",
96
+ }
97
+
98
+ def __init__(
99
+ self,
100
+ vocab_size=36000,
101
+ max_position_embeddings=1280,
102
+ d_model=1024,
103
+ d_ff=8192,
104
+ d_ext=4096,
105
+ d_spout=128,
106
+ num_switch_layers=10,
107
+ num_ext_layers=0,
108
+ num_heads=16,
109
+ num_experts=16,
110
+ expert_capacity=128,
111
+ dropout_rate=0.0,
112
+ layer_norm_epsilon=1e-5,
113
+ router_bias=False,
114
+ router_jitter_noise=0.0,
115
+ router_dtype="float32",
116
+ router_ignore_padding_tokens=False,
117
+ output_hidden_states=False,
118
+ output_attentions=False,
119
+ initializer_factor=0.002,
120
+ output_router_logits=False,
121
+ use_cache=True,
122
+ separator_token_id=35998,
123
+ pad_token_id=35995,
124
+ eos_token_id=35999,
125
+ **kwargs,
126
+ ):
127
+ self.vocab_size = vocab_size
128
+ self.max_position_embeddings = max_position_embeddings
129
+ self.d_model = d_model
130
+ self.d_ff = d_ff
131
+ self.d_ext = d_ext
132
+ self.d_spout = d_spout
133
+ self.num_switch_layers = num_switch_layers
134
+ self.num_ext_layers = num_ext_layers
135
+ self.num_layers = num_switch_layers + num_ext_layers
136
+ self.num_heads = num_heads
137
+ self.num_experts = num_experts
138
+ self.expert_capacity = expert_capacity
139
+ self.dropout_rate = dropout_rate
140
+ self.layer_norm_epsilon = layer_norm_epsilon
141
+ self.router_bias = router_bias
142
+ self.router_jitter_noise = router_jitter_noise
143
+ self.router_dtype = router_dtype
144
+ self.router_ignore_padding_tokens = router_ignore_padding_tokens
145
+ self.output_hidden_states = output_hidden_states
146
+ self.output_attentions = output_attentions
147
+ self.initializer_factor = initializer_factor
148
+ self.output_router_logits = output_router_logits
149
+ self.use_cache = use_cache
150
+
151
+ super().__init__(
152
+ separator_token_id=separator_token_id,
153
+ pad_token_id=pad_token_id,
154
+ eos_token_id=eos_token_id,
155
+ **kwargs,
156
+ )
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/convert_gptsan_tf_checkpoint_to_pytorch.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert GPTSANJapanese checkpoints from the original repository to pytorch model."""
17
+
18
+ import argparse
19
+ import json
20
+ import os
21
+ from collections import OrderedDict
22
+
23
+ import numpy as np
24
+ import tensorflow as tf
25
+ import torch
26
+
27
+
28
+ def convert_tf_gptsan_to_pt(args):
29
+ parameter_file = os.path.join(args.tf_model_dir, "parameters.json")
30
+ params = json.loads(open(parameter_file).read())
31
+ if not params:
32
+ raise ValueError(
33
+ f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file."
34
+ )
35
+ if not args.output.endswith(".pt"):
36
+ args.output = args.output + ".pt"
37
+ new_state = OrderedDict()
38
+ with tf.device("/CPU:0"):
39
+ reader = tf.train.load_checkpoint(args.tf_model_dir)
40
+ shapes = reader.get_variable_to_shape_map()
41
+ for key_name in shapes.keys():
42
+ vnp = reader.get_tensor(key_name).astype(np.float16)
43
+ if key_name.endswith("/adam_m") or key_name.endswith("/adam_v"):
44
+ continue
45
+ if key_name.startswith("pasts/"):
46
+ if key_name.startswith("pasts/mlp"):
47
+ player = int(key_name[9])
48
+ elif key_name.startswith("pasts/out"):
49
+ player = 8
50
+ name = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
51
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
52
+ new_state[name] = torch.tensor(state)
53
+ elif key_name.startswith("model/moe"):
54
+ player = int(key_name[9:].split("/")[0])
55
+ if key_name.endswith("/switch_gating/kernel"):
56
+ name = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
57
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
58
+ new_state[name] = torch.tensor(state)
59
+ elif key_name.endswith("/softmlp/kernel"):
60
+ name = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
61
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
62
+ new_state[name] = torch.tensor(state)
63
+ elif key_name.endswith("/wo/kernel") or key_name.endswith("/wi/kernel"):
64
+ nlayer = key_name[-9:-7]
65
+ for i in range(16):
66
+ name = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
67
+ state = (
68
+ vnp[i].transpose([1, 0]).copy()
69
+ ) # In Mesh-Tensorflow, it is one array, so it is divided
70
+ new_state[name] = torch.tensor(state)
71
+ elif key_name.startswith("model/mlp"):
72
+ player = int(key_name[9:].split("/")[0])
73
+ if key_name.endswith("/p1/kernel"):
74
+ name = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
75
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
76
+ new_state[name] = torch.tensor(state)
77
+ elif key_name.endswith("/p1/bias"):
78
+ name = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
79
+ state = vnp.copy() # same because it is one dimensional
80
+ new_state[name] = torch.tensor(state)
81
+ elif key_name.endswith("/p2/kernel"):
82
+ name = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
83
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
84
+ new_state[name] = torch.tensor(state)
85
+ elif key_name.endswith("/p2/bias"):
86
+ name = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
87
+ state = vnp.copy() # same because it is one dimensional
88
+ new_state[name] = torch.tensor(state)
89
+ elif key_name.startswith("model/ln"):
90
+ player = int(key_name[8:].split("/")[0])
91
+ if key_name.endswith("/b"):
92
+ name = "model.blocks.%d.feed_forward.norm.bias" % player
93
+ state = vnp.copy() # same because it is one dimensional
94
+ new_state[name] = torch.tensor(state)
95
+ elif key_name.endswith("/g"):
96
+ name = "model.blocks.%d.feed_forward.norm.weight" % player
97
+ state = vnp.copy() # same because it is one dimensional
98
+ new_state[name] = torch.tensor(state)
99
+ elif key_name.startswith("model/att"):
100
+ player = int(key_name[9:].split("/")[0])
101
+ if key_name.endswith("/qkv/kernel"):
102
+ state = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
103
+ state_q = state[:, 0, :, :]
104
+ state_k = state[:, 1, :, :]
105
+ state_v = state[:, 2, :, :]
106
+ state_q = (
107
+ state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
108
+ .transpose([1, 0])
109
+ .copy()
110
+ ) # Mesh-Tensorflow is a diagonal matrix
111
+ state_k = (
112
+ state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
113
+ .transpose([1, 0])
114
+ .copy()
115
+ ) # Mesh-Tensorflow is a diagonal matrix
116
+ state_v = (
117
+ state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
118
+ .transpose([1, 0])
119
+ .copy()
120
+ ) # Mesh-Tensorflow is a diagonal matrix
121
+ name = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
122
+ new_state[name] = torch.tensor(state_q)
123
+ name = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
124
+ new_state[name] = torch.tensor(state_k)
125
+ name = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
126
+ new_state[name] = torch.tensor(state_v)
127
+ elif key_name.endswith("/o/kernel"):
128
+ name = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
129
+ state = (
130
+ vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
131
+ ) # Mesh-Tensorflow is a diagonal matrix
132
+ new_state[name] = torch.tensor(state)
133
+ elif key_name.startswith("model/an"):
134
+ player = int(key_name[8:].split("/")[0])
135
+ if key_name.endswith("/b"):
136
+ name = "model.blocks.%d.self_attn.norm.bias" % player
137
+ state = vnp.copy() # same because it is one dimensional
138
+ new_state[name] = torch.tensor(state)
139
+ elif key_name.endswith("/g"):
140
+ name = "model.blocks.%d.self_attn.norm.weight" % player
141
+ state = vnp.copy() # same because it is one dimensional
142
+ new_state[name] = torch.tensor(state)
143
+ elif (
144
+ key_name.startswith("model/wte")
145
+ or key_name.startswith("model/wpe")
146
+ or key_name.startswith("model/ete")
147
+ ):
148
+ nlayer = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
149
+ key_name[-3:]
150
+ ]
151
+ name = "model.%s.weight" % nlayer
152
+ state = vnp.copy() # same in embedded
153
+ new_state[name] = torch.tensor(state)
154
+ if key_name.startswith("model/wte"):
155
+ name = "lm_head.weight"
156
+ state = vnp.copy() # same in embedded
157
+ new_state[name] = torch.tensor(state)
158
+ elif key_name.startswith("model/wob"):
159
+ name = "final_logits_bias"
160
+ state = vnp.copy() # same in embedded
161
+ state = state.reshape((1, -1))
162
+ new_state[name] = torch.tensor(state)
163
+ elif key_name == "model/dense/kernel":
164
+ name = "model.last_project.weight"
165
+ state = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
166
+ new_state[name] = torch.tensor(state)
167
+ elif key_name == "model/dense_1/bias":
168
+ name = "model.last_project.bias"
169
+ state = vnp.copy() # same because it is one dimensional
170
+ new_state[name] = torch.tensor(state)
171
+ torch.save(new_state, args.output)
172
+
173
+
174
+ if __name__ == "__main__":
175
+ parser = argparse.ArgumentParser(
176
+ description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
177
+ )
178
+ parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
179
+ parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
180
+ args = parser.parse_args()
181
+ convert_tf_gptsan_to_pt(args)
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py ADDED
@@ -0,0 +1,1343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Toshiyuki Sakamoto(tanreinama) and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch GPTSANJapanese model."""
16
+
17
+
18
+ import copy
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import MoECausalLMOutputWithPast, MoEModelOutputWithPastAndCrossAttentions
26
+ from ...modeling_utils import PreTrainedModel
27
+ from ...utils import (
28
+ DUMMY_INPUTS,
29
+ DUMMY_MASK,
30
+ add_start_docstrings,
31
+ add_start_docstrings_to_model_forward,
32
+ is_torch_fx_proxy,
33
+ logging,
34
+ )
35
+ from .configuration_gptsan_japanese import GPTSanJapaneseConfig
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CONFIG_FOR_DOC = "GPTSanJapaneseConfig"
41
+ _CHECKPOINT_FOR_DOC = "Tanrei/GPTSAN-japanese"
42
+
43
+ ####################################################
44
+ # This dict contains ids and associated url
45
+ # for the pretrained weights provided with the models
46
+ ####################################################
47
+
48
+ from ..deprecated._archive_maps import GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.router_z_loss_func
52
+ def router_z_loss_func(router_logits: torch.Tensor) -> float:
53
+ r"""
54
+ Compute the router z-loss implemented in PyTorch.
55
+
56
+ The router z-loss was introduced in [Designing Effective Sparse Expert Models](https://arxiv.org/abs/2202.08906).
57
+ It encourages router logits to remain small in an effort to improve stability.
58
+
59
+ Args:
60
+ router_logits (`float`):
61
+ Input logits of shape [batch_size, sequence_length, num_experts]
62
+
63
+ Returns:
64
+ Scalar router z-loss.
65
+ """
66
+ num_groups, tokens_per_group, _ = router_logits.shape
67
+ log_z = torch.logsumexp(router_logits, dim=-1)
68
+ z_loss = log_z**2
69
+ return torch.sum(z_loss) / (num_groups * tokens_per_group)
70
+
71
+
72
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.load_balancing_loss_func
73
+ def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float:
74
+ r"""
75
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
76
+
77
+ See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
78
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
79
+ experts is too unbalanced.
80
+
81
+ Args:
82
+ router_probs (`torch.Tensor`):
83
+ Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts].
84
+ expert_indices (`torch.Tensor`):
85
+ Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token.
86
+
87
+ Returns:
88
+ The auxiliary loss.
89
+ """
90
+ num_experts = router_probs.shape[-1]
91
+
92
+ # cast the expert indices to int64, otherwise one-hot encoding will fail
93
+ if expert_indices.dtype != torch.int64:
94
+ expert_indices = expert_indices.to(torch.int64)
95
+
96
+ if len(expert_indices.shape) == 2:
97
+ expert_indices = expert_indices.unsqueeze(2)
98
+
99
+ expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts)
100
+
101
+ # For a given token, determine if it was routed to a given expert.
102
+ expert_mask = torch.max(expert_mask, axis=-2).values
103
+
104
+ # cast to float32 otherwise mean will fail
105
+ expert_mask = expert_mask.to(torch.float32)
106
+ tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2)
107
+
108
+ router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2)
109
+ return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2)
110
+
111
+
112
+ class GPTSanJapaneseDenseActDense(nn.Module):
113
+ """
114
+ FFN Layer for Switch Transformer and Extra layers
115
+
116
+ GPTSAN can mix Switch Transformer layers and normal Transformer layers This class is used as Expert in Switch
117
+ Transformer layers and as FFN in regular Transformer layers. RELU is used in the Switch Transformer layer, and
118
+ Swish is used in the normal Transformer layer, so there is a choice of which is used in the argument.
119
+
120
+ """
121
+
122
+ def __init__(self, config: GPTSanJapaneseConfig, ext_layer=False):
123
+ super().__init__()
124
+ d_inter = config.d_ext if ext_layer else config.d_ff
125
+ self.wi = nn.Linear(config.d_model, d_inter, bias=ext_layer)
126
+ self.wo = nn.Linear(d_inter, config.d_model, bias=ext_layer)
127
+ self.dropout = nn.Identity() if ext_layer else nn.Dropout(config.dropout_rate)
128
+ self.act = ACT2FN["swish" if ext_layer else "relu"]
129
+
130
+ def forward(self, hidden_states):
131
+ r"""
132
+ Args:
133
+ hidden_states (`torch.Tensor`) :
134
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
135
+ Returns:
136
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
137
+
138
+ """
139
+ hidden_states = self.wi(hidden_states)
140
+ hidden_states = self.act(hidden_states)
141
+ hidden_states = self.dropout(hidden_states)
142
+ hidden_states = self.wo(hidden_states)
143
+ return hidden_states
144
+
145
+
146
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersTop1Router with SwitchTransformers->GPTSanJapanese
147
+ class GPTSanJapaneseTop1Router(nn.Module):
148
+ """
149
+ Router using tokens choose top-1 experts assignment.
150
+
151
+ This router uses the same mechanism as in Switch Transformer (https://arxiv.org/abs/2101.03961) and V-MoE
152
+ (https://arxiv.org/abs/2106.05974): tokens choose their top experts. Items are sorted by router_probs and then
153
+ routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each
154
+ token is processed by an expert**, or that each expert receives at least one token.
155
+
156
+ """
157
+
158
+ def __init__(self, config: GPTSanJapaneseConfig):
159
+ super().__init__()
160
+ self.num_experts = config.num_experts
161
+ self.expert_capacity = config.expert_capacity
162
+ self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias)
163
+ self.jitter_noise = config.router_jitter_noise
164
+ self.ignore_padding_tokens = config.router_ignore_padding_tokens
165
+ self.dtype = getattr(torch, config.router_dtype)
166
+
167
+ def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
168
+ r"""
169
+ Computes router probabilities from input hidden states.
170
+
171
+ Args:
172
+ hidden_states (`torch.Tensor`):
173
+ (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.
174
+ Returns:
175
+ router_probabilities (`torch.Tensor`):
176
+ Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each
177
+ token and expert. Used for routing tokens to experts.
178
+ router_logits (`torch.Tensor`):
179
+ Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.
180
+ This is used later for computing router z-loss.
181
+ """
182
+ # float32 is used to ensure stability. See the discussion of "selective precision" in
183
+ # https://arxiv.org/abs/2101.03961.
184
+ # We also store the previous dtype to cast back the output to the previous dtype
185
+ self.input_dtype = hidden_states.dtype
186
+ hidden_states = hidden_states.to(self.dtype)
187
+
188
+ if self.training and self.jitter_noise > 0:
189
+ # Multiply the token inputs by the uniform distribution - adding some noise
190
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
191
+
192
+ # Shape: [num_groups, tokens_per_group, num_experts]
193
+ self._cast_classifier()
194
+ router_logits = self.classifier(hidden_states)
195
+
196
+ # Apply Softmax and cast back to the original `dtype`
197
+ router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)
198
+ return router_probabilities, router_logits
199
+
200
+ def _cast_classifier(self):
201
+ r"""
202
+ `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an
203
+ instance of the `Linear8bitLt` class by checking special attributes.
204
+ """
205
+ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")):
206
+ self.classifier = self.classifier.to(self.dtype)
207
+
208
+ def forward(self, hidden_states: torch.Tensor) -> Tuple:
209
+ r"""
210
+ Generic forward function for every Router class. Each Router expects to have the same input hidden states
211
+ (`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the
212
+ number of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.
213
+
214
+ Each Router works as the following: it expects the hidden states for each token, gets the `router_probs` and
215
+ `router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned
216
+ to an expert. Then each Router class will have to define its own `_compute_routing_instructions`.
217
+
218
+ Args:
219
+ hidden_states (`torch.Tensor`) :
220
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
221
+ Returns:
222
+ Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs
223
+ and the router logits. The router probabilities and logits are required to compute the loss.
224
+ """
225
+ router_probs, router_logits = self._compute_router_probabilities(hidden_states)
226
+
227
+ expert_index = torch.argmax(router_probs, dim=-1)
228
+ expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)
229
+
230
+ # Mask tokens outside expert capacity. Sum over each sequence
231
+ token_priority = torch.cumsum(expert_index, dim=-2)
232
+ # mask if the token routed to to the expert will overflow
233
+ expert_capacity_mask = token_priority <= self.expert_capacity
234
+ expert_index = expert_index * expert_capacity_mask
235
+
236
+ router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)
237
+ return expert_index, router_probs, router_logits
238
+
239
+
240
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersSparseMLP with SwitchTransformers->GPTSanJapanese
241
+ class GPTSanJapaneseSparseMLP(nn.Module):
242
+ r"""
243
+ Implementation of the Switch Transformers Sparse MLP module.
244
+ """
245
+
246
+ def __init__(self, config: GPTSanJapaneseConfig, expert_class: nn.Module = GPTSanJapaneseDenseActDense):
247
+ super().__init__()
248
+ # Step 1: Get the correct router according to its class
249
+ self.router = GPTSanJapaneseTop1Router(config)
250
+
251
+ # Step 2: Get the experts
252
+ self.experts = nn.ModuleDict()
253
+ for idx in range(config.num_experts):
254
+ self.experts[f"expert_{idx}"] = expert_class(config)
255
+
256
+ def forward(self, hidden_states):
257
+ r"""
258
+ Hold on, this will be slightly tricky to understand In the correct order, a MoE layer does the following:
259
+
260
+ 1- Gets the `router_mask` from the router. The shape of the mask is `(batch_size, sequence_length, num_expert)`
261
+ and corresponds to the argmax of the `router_probs`. The probabilities are needed in the computation of the
262
+ hidden states : they are broadcasted to the hidden states values (can be interpreted as a scaling factor).
263
+
264
+ 2- Dispatch the tokens to its associated experts. We do a classic for loop over the experts and assign for each
265
+ expert the corresponding hidden states.
266
+
267
+ """
268
+ # Step 1: Get the router_mask from the router as wel as the probabilities
269
+ router_mask, router_probs, router_logits = self.router(hidden_states)
270
+ expert_index = torch.argmax(router_mask, dim=-1)
271
+
272
+ # The routers introduced might not always map all the tokens, to a router, which means that some hidden states
273
+ # can be unchanged from one layer to another. That is why the hidden states are cloned before updating only the seleced ones.
274
+
275
+ next_states = hidden_states.clone()
276
+ for idx, expert in enumerate(self.experts.values()):
277
+ token_indices = router_mask[:, :, idx].bool()
278
+ next_states[token_indices] = expert(hidden_states[token_indices]).to(next_states.dtype)
279
+
280
+ hidden_states = router_probs * next_states
281
+ return hidden_states, (router_logits, expert_index)
282
+
283
+
284
+ class GPTSanJapaneseLayerSparseFF(nn.Module):
285
+ r"""
286
+ Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.
287
+
288
+ Parameters:
289
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
290
+ Initializing with a config file does not load the weights associated with the model, only the
291
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
292
+ """
293
+
294
+ def __init__(self, config: GPTSanJapaneseConfig):
295
+ super().__init__()
296
+ self.mlp = GPTSanJapaneseSparseMLP(config)
297
+ self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)
298
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
299
+
300
+ def forward(self, hidden_states, output_router_logits):
301
+ r"""
302
+ Args:
303
+ hidden_states (`torch.Tensor`) :
304
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
305
+ output_router_logits (`bool`) :
306
+ output experts router output.
307
+ Returns:
308
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
309
+
310
+ """
311
+ forwarded_states, router_tuple = self.mlp(hidden_states)
312
+ forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))
313
+ output = hidden_states + self.norm(forwarded_states)
314
+
315
+ if output_router_logits and router_tuple is not None:
316
+ return output, router_tuple
317
+ else:
318
+ return output
319
+
320
+
321
+ class GPTSanJapaneseLayerDenseFF(nn.Module):
322
+ r"""
323
+ Extra Transformers Feed Forward layer module.
324
+
325
+ Parameters:
326
+ config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
327
+ Initializing with a config file does not load the weights associated with the model, only the
328
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
329
+ """
330
+
331
+ def __init__(self, config: GPTSanJapaneseConfig):
332
+ super().__init__()
333
+ # Check if it is a sparse layer, if not then it is a dense layer
334
+ self.mlp = GPTSanJapaneseDenseActDense(config, ext_layer=True)
335
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
336
+
337
+ def forward(self, hidden_states):
338
+ r"""
339
+ Args:
340
+ hidden_states (`torch.Tensor`) :
341
+ [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.
342
+ Returns:
343
+ torch.Tensor[num_groups, tokens_per_group, hidden_dim]
344
+
345
+ """
346
+ forwarded_states = self.mlp(hidden_states)
347
+ output = hidden_states + self.norm(forwarded_states)
348
+ return output
349
+
350
+
351
+ # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->GPTSanJapanese
352
+ class GPTSanJapaneseAttention(nn.Module):
353
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
354
+
355
+ def __init__(
356
+ self,
357
+ embed_dim: int,
358
+ num_heads: int,
359
+ dropout: float = 0.0,
360
+ is_decoder: bool = False,
361
+ bias: bool = True,
362
+ is_causal: bool = False,
363
+ config: Optional[GPTSanJapaneseConfig] = None,
364
+ ):
365
+ super().__init__()
366
+ self.embed_dim = embed_dim
367
+ self.num_heads = num_heads
368
+ self.dropout = dropout
369
+ self.head_dim = embed_dim // num_heads
370
+ self.config = config
371
+
372
+ if (self.head_dim * num_heads) != self.embed_dim:
373
+ raise ValueError(
374
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
375
+ f" and `num_heads`: {num_heads})."
376
+ )
377
+ self.scaling = self.head_dim**-0.5
378
+ self.is_decoder = is_decoder
379
+ self.is_causal = is_causal
380
+
381
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
382
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
383
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
384
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
385
+
386
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
387
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
388
+
389
+ def forward(
390
+ self,
391
+ hidden_states: torch.Tensor,
392
+ key_value_states: Optional[torch.Tensor] = None,
393
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
394
+ attention_mask: Optional[torch.Tensor] = None,
395
+ layer_head_mask: Optional[torch.Tensor] = None,
396
+ output_attentions: bool = False,
397
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
398
+ """Input shape: Batch x Time x Channel"""
399
+
400
+ # if key_value_states are provided this layer is used as a cross-attention layer
401
+ # for the decoder
402
+ is_cross_attention = key_value_states is not None
403
+
404
+ bsz, tgt_len, _ = hidden_states.size()
405
+
406
+ # get query proj
407
+ query_states = self.q_proj(hidden_states) * self.scaling
408
+ # get key, value proj
409
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
410
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
411
+ # the provided `key_value_states` to support prefix tuning
412
+ if (
413
+ is_cross_attention
414
+ and past_key_value is not None
415
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
416
+ ):
417
+ # reuse k,v, cross_attentions
418
+ key_states = past_key_value[0]
419
+ value_states = past_key_value[1]
420
+ elif is_cross_attention:
421
+ # cross_attentions
422
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
423
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
424
+ elif past_key_value is not None:
425
+ # reuse k, v, self_attention
426
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
427
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
428
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
429
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
430
+ else:
431
+ # self_attention
432
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
433
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
434
+
435
+ if self.is_decoder:
436
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
437
+ # Further calls to cross_attention layer can then reuse all cross-attention
438
+ # key/value_states (first "if" case)
439
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
440
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
441
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
442
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
443
+ past_key_value = (key_states, value_states)
444
+
445
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
446
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
447
+ key_states = key_states.reshape(*proj_shape)
448
+ value_states = value_states.reshape(*proj_shape)
449
+
450
+ src_len = key_states.size(1)
451
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
452
+
453
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
454
+ raise ValueError(
455
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
456
+ f" {attn_weights.size()}"
457
+ )
458
+
459
+ if attention_mask is not None:
460
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
461
+ raise ValueError(
462
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
463
+ )
464
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
465
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
466
+
467
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
468
+
469
+ if layer_head_mask is not None:
470
+ if layer_head_mask.size() != (self.num_heads,):
471
+ raise ValueError(
472
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
473
+ f" {layer_head_mask.size()}"
474
+ )
475
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
476
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
477
+
478
+ if output_attentions:
479
+ # this operation is a bit awkward, but it's required to
480
+ # make sure that attn_weights keeps its gradient.
481
+ # In order to do so, attn_weights have to be reshaped
482
+ # twice and have to be reused in the following
483
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
484
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
485
+ else:
486
+ attn_weights_reshaped = None
487
+
488
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
489
+
490
+ attn_output = torch.bmm(attn_probs, value_states)
491
+
492
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
493
+ raise ValueError(
494
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
495
+ f" {attn_output.size()}"
496
+ )
497
+
498
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
499
+ attn_output = attn_output.transpose(1, 2)
500
+
501
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
502
+ # partitioned across GPUs when using tensor-parallelism.
503
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
504
+
505
+ attn_output = self.out_proj(attn_output)
506
+
507
+ return attn_output, attn_weights_reshaped, past_key_value
508
+
509
+
510
+ class GPTSanJapaneseLayerSelfAttention(nn.Module):
511
+ """
512
+ Self Attention and Normalization Unit
513
+ """
514
+
515
+ def __init__(self, config, has_relative_attention_bias=False):
516
+ super().__init__()
517
+ self.self_attn = GPTSanJapaneseAttention(
518
+ embed_dim=config.d_model,
519
+ num_heads=config.num_heads,
520
+ is_decoder=True,
521
+ bias=has_relative_attention_bias,
522
+ )
523
+ self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
524
+
525
+ def forward(
526
+ self,
527
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
528
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
529
+ attention_mask: Optional[torch.FloatTensor] = None,
530
+ head_mask: Optional[torch.FloatTensor] = None,
531
+ use_cache: Optional[bool] = False,
532
+ output_attentions: Optional[bool] = False,
533
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
534
+ r"""
535
+ Self-attention and normalize block.
536
+
537
+ Args:
538
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
539
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
540
+ if the model is configured as a decoder.
541
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
542
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
543
+ decoding. If `past_key_values` are used, the user can optionally input only the last
544
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
545
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
546
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
547
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
548
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
549
+
550
+ - 1 for tokens that are **not masked**,
551
+ - 0 for tokens that are **masked**.
552
+
553
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
554
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
555
+
556
+ - 1 indicates the head is **not masked**,
557
+ - 0 indicates the head is **masked**.
558
+
559
+ use_cache (`bool`, *optional*):
560
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
561
+ (see `past_key_values`).
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
564
+ returned tensors for more detail.
565
+ Returns:
566
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
567
+ """
568
+ # Self Attention
569
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
570
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
571
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
572
+ atten_out = self.self_attn(
573
+ hidden_states=hidden_states,
574
+ past_key_value=self_attn_past_key_value,
575
+ attention_mask=(1 - attention_mask) * torch.finfo(hidden_states.dtype).min,
576
+ layer_head_mask=head_mask,
577
+ output_attentions=output_attentions,
578
+ )
579
+ if output_attentions:
580
+ attn_weights = (atten_out[1],)
581
+ else:
582
+ attn_weights = ()
583
+
584
+ attention_output = atten_out[0]
585
+
586
+ hidden = hidden_states + self.norm(attention_output)
587
+
588
+ if use_cache:
589
+ outputs = (hidden, atten_out[2]) # hidden, present, (attentions)
590
+ else:
591
+ outputs = (hidden,) # hidden, (attentions)
592
+
593
+ return outputs + attn_weights
594
+
595
+
596
+ class GPTSanJapaneseBlock(nn.Module):
597
+ """
598
+ Self Attention and FFN Unit
599
+ """
600
+
601
+ def __init__(self, config, ext_layer=False):
602
+ super().__init__()
603
+ self.self_attn = GPTSanJapaneseLayerSelfAttention(config)
604
+ self.feed_forward = GPTSanJapaneseLayerDenseFF(config) if ext_layer else GPTSanJapaneseLayerSparseFF(config)
605
+
606
+ def forward(
607
+ self,
608
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
609
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
610
+ attention_mask: Optional[torch.FloatTensor] = None,
611
+ head_mask: Optional[torch.FloatTensor] = None,
612
+ use_cache: Optional[bool] = False,
613
+ output_attentions: Optional[bool] = False,
614
+ output_router_tuple: Optional[bool] = False,
615
+ ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
616
+ r"""
617
+ GPTSAN transformer block.
618
+
619
+ Args:
620
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
621
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
622
+ if the model is configured as a decoder.
623
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
624
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
625
+ decoding. If `past_key_values` are used, the user can optionally input only the last
626
+ `decoder_input_ids` (those that don't have their past key value states given to this model) of shape
627
+ `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
628
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
629
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
630
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
631
+
632
+ - 1 for tokens that are **not masked**,
633
+ - 0 for tokens that are **masked**.
634
+
635
+ head_mask (`numpy.ndarray` of shape `({0})`, `optional):
636
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
637
+
638
+ - 1 indicates the head is **not masked**,
639
+ - 0 indicates the head is **masked**.
640
+
641
+ use_cache (`bool`, *optional*):
642
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
643
+ (see `past_key_values`).
644
+ output_attentions (`bool`) :
645
+ output attention probabirities.
646
+ output_router_tuple:
647
+ output experts router logits and expert id.
648
+ Returns:
649
+ Tuple[torch.Tensor[num_groups, tokens_per_group, hidden_dim],...]
650
+ """
651
+ atten_out = self.self_attn(
652
+ hidden_states=hidden_states,
653
+ past_key_value=past_key_value,
654
+ attention_mask=attention_mask,
655
+ head_mask=head_mask,
656
+ use_cache=use_cache,
657
+ output_attentions=output_attentions,
658
+ )
659
+ attention_output = atten_out[0]
660
+
661
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF):
662
+ sparse_out = self.feed_forward(attention_output, output_router_tuple)
663
+ if output_router_tuple:
664
+ hidden, router_tuple = sparse_out
665
+ else:
666
+ hidden = sparse_out
667
+ else:
668
+ hidden = self.feed_forward(attention_output)
669
+
670
+ outputs = (hidden,) + atten_out[1:]
671
+
672
+ if isinstance(self.feed_forward, GPTSanJapaneseLayerSparseFF) and output_router_tuple:
673
+ outputs += (router_tuple,)
674
+
675
+ return outputs
676
+
677
+
678
+ class GPTSanJapanesePreTrainedModel(PreTrainedModel):
679
+ """
680
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
681
+ models.
682
+ """
683
+
684
+ config_class = GPTSanJapaneseConfig
685
+ base_model_prefix = "gptsan_japanese"
686
+ supports_gradient_checkpointing = False
687
+ _no_split_modules = ["GPTSanJapaneseBlock"]
688
+ _skip_keys_device_placement = "past_key_values"
689
+
690
+ @property
691
+ def dummy_inputs(self):
692
+ input_ids = torch.tensor(DUMMY_INPUTS)
693
+ input_mask = torch.tensor(DUMMY_MASK)
694
+ dummy_inputs = {
695
+ "input_ids": input_ids,
696
+ "attention_mask": input_mask,
697
+ }
698
+ return dummy_inputs
699
+
700
+ def _init_weights(self, module):
701
+ """Initialize the weights"""
702
+ factor = self.config.initializer_factor # Used for testing weights initialization
703
+ if isinstance(module, nn.LayerNorm):
704
+ module.weight.data.fill_(factor * 1.0)
705
+ module.bias.data.zero_()
706
+ elif isinstance(module, nn.Linear):
707
+ module.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
708
+ if hasattr(module, "bias") and module.bias is not None:
709
+ module.bias.data.zero_()
710
+ elif isinstance(module, nn.Embedding):
711
+ module.weight.data.normal_(mean=0.0, std=factor * 1.0)
712
+ elif isinstance(module, GPTSanJapaneseModel):
713
+ # Mesh TensorFlow embeddings initialization
714
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
715
+ module.embed_tokens.weight.data.normal_(mean=0.0, std=factor * 1.0)
716
+ module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
717
+ if hasattr(module, "extra_position_embeddings") and module.extra_position_embeddings is not None:
718
+ module.extra_position_embeddings.weight.data.normal_(mean=0.0, std=factor * 1.0)
719
+ elif isinstance(module, (GPTSanJapaneseModel, GPTSanJapaneseForConditionalGeneration)):
720
+ # Mesh TensorFlow embeddings initialization
721
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
722
+ module.final_logits_bias.data.normal_(mean=0.0, std=factor * 1.0)
723
+ if hasattr(module, "lm_head") and not self.config.tie_word_embeddings:
724
+ module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0)
725
+ elif isinstance(module, GPTSanJapaneseDenseActDense):
726
+ # Mesh TensorFlow FF initialization
727
+ # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
728
+ # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
729
+ module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
730
+ if hasattr(module.wi, "bias") and module.wi.bias is not None:
731
+ module.wi.bias.data.zero_()
732
+ module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
733
+ if hasattr(module.wo, "bias") and module.wo.bias is not None:
734
+ module.wo.bias.data.zero_()
735
+ elif isinstance(module, GPTSanJapaneseAttention):
736
+ # Multi-headed attention
737
+ d_model = self.config.d_model
738
+ key_value_proj_dim = self.config.d_model
739
+ n_heads = self.config.num_heads
740
+ module.k_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
741
+ module.v_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
742
+ module.q_proj.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5))
743
+ module.out_proj.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5))
744
+ elif isinstance(module, GPTSanJapaneseSparseMLP):
745
+ # Mesh TensorFlow attention initialization to avoid scaling before softmax
746
+ # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
747
+ d_model = self.config.d_model
748
+ key_value_proj_dim = self.config.d_model
749
+ n_heads = self.config.num_heads
750
+ module.router.classifier.weight.data.normal_(mean=0.0, std=factor * 1)
751
+ for idx in range(self.config.num_experts):
752
+ module.experts[f"expert_{idx}"].wi.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
753
+ module.experts[f"expert_{idx}"].wo.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5))
754
+
755
+ # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right
756
+ def _shift_right(self, input_ids):
757
+ decoder_start_token_id = self.config.decoder_start_token_id
758
+ pad_token_id = self.config.pad_token_id
759
+
760
+ if decoder_start_token_id is None:
761
+ raise ValueError(
762
+ "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. "
763
+ "See T5 docs for more information."
764
+ )
765
+
766
+ # shift inputs to the right
767
+ if is_torch_fx_proxy(input_ids):
768
+ # Item assignment is not supported natively for proxies.
769
+ shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id)
770
+ shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
771
+ else:
772
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
773
+ shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
774
+ shifted_input_ids[..., 0] = decoder_start_token_id
775
+
776
+ if pad_token_id is None:
777
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
778
+ # replace possible -100 values in labels by `pad_token_id`
779
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
780
+
781
+ return shifted_input_ids
782
+
783
+
784
+ GPTSAN_JAPANESE_START_DOCSTRING = r"""
785
+
786
+ The [GPTSAN-japanese](https://github.com/tanreinama/GPTSAN) model was proposed in General-purpose Swich transformer
787
+ based Japanese language model
788
+
789
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
790
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
791
+ and behavior.
792
+
793
+ Parameters:
794
+ config ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.
795
+ Initializing with a config file does not load the weights associated with the model, only the
796
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
797
+ """
798
+
799
+ GPTSAN_JAPANESE_INPUTS_DOCSTRING = r"""
800
+ Args:
801
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
802
+ Indices of input sequence tokens in the vocabulary. GPTSAN-japanese is a model that generates sentence
803
+ continuations or predicts tokens at mask positions. Special tokens required for inputs to the model are
804
+ automatically appended.
805
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
806
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
807
+
808
+ - 1 for tokens that are **not masked**,
809
+ - 0 for tokens that are **masked**.
810
+
811
+ [What are attention masks?](../glossary#attention-mask)
812
+ token_type_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
813
+ An input that masks the Prefix part in the Prefix-LM input. Mask values selected in `[0, 1]`:
814
+
815
+ - 1 for tokens that are **prefix** input,
816
+ - 0 for tokens that are **not-prefix** input.
817
+ spout (`torch.Tensor` of shape `(batch_size, config.d_spout)`):
818
+ This vector is transformed through an 8-layer FFN and can be used instead of `past_key_values`.
819
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
820
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
821
+
822
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
823
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
824
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
825
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
826
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
827
+ use_cache (`bool`, *optional*):
828
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
829
+ `past_key_values`).
830
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
831
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
832
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
833
+ model's internal embedding lookup matrix.
834
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
835
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
836
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
837
+ input (see `past_key_values`). This is useful if you want more control over how to convert
838
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
839
+ output_attentions (`bool`, *optional*):
840
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
841
+ tensors for more detail.
842
+ output_hidden_states (`bool`, *optional*):
843
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
844
+ more detail.
845
+ return_dict (`bool`, *optional*):
846
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
847
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
848
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
849
+ Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.
850
+ """
851
+
852
+
853
+ @add_start_docstrings(
854
+ "The bare GPTSAN-japanese Model transformer outputting raw hidden-states without any specific head on top.",
855
+ GPTSAN_JAPANESE_START_DOCSTRING,
856
+ )
857
+ class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel):
858
+ def __init__(self, config: GPTSanJapaneseConfig):
859
+ super().__init__(config)
860
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
861
+ self.config = copy.deepcopy(config)
862
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
863
+ self.last_project = nn.Linear(config.d_model, config.d_model, bias=True)
864
+ self.act = ACT2FN["swish"]
865
+
866
+ self.blocks = torch.nn.ModuleList([])
867
+ for _ in range(config.num_switch_layers):
868
+ self.blocks.append(GPTSanJapaneseBlock(config))
869
+ for _ in range(config.num_ext_layers):
870
+ self.blocks.append(GPTSanJapaneseBlock(config, ext_layer=True))
871
+
872
+ if config.num_ext_layers > 0:
873
+ self.extra_position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
874
+
875
+ if config.d_spout:
876
+ spouts = []
877
+ for _ in range(8):
878
+ spouts.append(nn.Linear(config.d_spout, config.d_spout, bias=False))
879
+ spouts.append(nn.Tanh())
880
+ spouts.append(nn.Linear(config.d_spout, config.num_layers * 2 * config.d_model, bias=False))
881
+ self.spout = nn.Sequential(*spouts)
882
+
883
+ self.post_init()
884
+
885
+ def get_input_embeddings(self):
886
+ return self.embed_tokens
887
+
888
+ def set_input_embeddings(self, new_embeddings):
889
+ self.embed_tokens = new_embeddings
890
+
891
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
892
+ def forward(
893
+ self,
894
+ input_ids: Optional[torch.LongTensor] = None,
895
+ attention_mask: Optional[torch.FloatTensor] = None,
896
+ token_type_ids: Optional[torch.FloatTensor] = None,
897
+ spout: Optional[torch.FloatTensor] = None,
898
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
899
+ head_mask: Optional[torch.FloatTensor] = None,
900
+ use_cache: Optional[bool] = False,
901
+ inputs_embeds: Optional[torch.FloatTensor] = None,
902
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
903
+ output_attentions: Optional[bool] = None,
904
+ output_hidden_states: Optional[bool] = None,
905
+ return_dict: Optional[bool] = None,
906
+ output_router_logits: Optional[bool] = None,
907
+ num_precontext: Optional[torch.LongTensor] = None,
908
+ ) -> Union[MoEModelOutputWithPastAndCrossAttentions, Tuple[torch.FloatTensor]]:
909
+ r"""
910
+ num_precontext (`torch.LongTensor` of shape `(batch_size,1)`):
911
+ length of `hybrid` input tokens in the input. Tokens up to this length refer to both front and back like
912
+ BERT, tokens after that refer only to front like GPT. see also:
913
+ https://github.com/tanreinama/GPTSAN/blob/main/report/model.md
914
+
915
+ Returns:
916
+ `MoEModelOutputWithPastAndCrossAttentions` or `tuple` if `return_dict` returns
917
+ MoEModelOutputWithPastAndCrossAttentions insted of tuple
918
+ """
919
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
920
+ device = self.position_embeddings.weight.device
921
+ if input_ids is None:
922
+ input_ids = torch.zeros([1, 1]).int().to(device) # dummy for input_ids was None
923
+ num_pasts_contexts = 0
924
+ num_batch = input_ids.shape[0]
925
+ pasts_or_spout_value = None
926
+ if past_key_values is not None:
927
+ num_pasts_contexts = past_key_values[0][0].shape[2]
928
+ elif self.config.d_spout and spout is not None:
929
+ # `spout` is a special input vector specific to GPTSAN
930
+ # This controls the output by projecting embedded information such as the class of sentences during learning.
931
+ # It should passed instead of the first past_key_value.
932
+ # See the original GPTSAN repository for details
933
+ num_pasts_contexts += 1
934
+
935
+ # If there is an attention_mask, increase first one for spout
936
+ if self.config.d_spout and spout is not None and attention_mask is not None:
937
+ attention_mask_with_spout = torch.ones(num_batch, attention_mask.shape[1] + 1, device=device)
938
+ attention_mask_with_spout[:, 1:] -= 1 - attention_mask # 1st token should be spout
939
+ attention_mask = attention_mask_with_spout # update attention_mask
940
+
941
+ if num_precontext is not None:
942
+ # `num_precontext` is the number of tokens that refer to each other in prefix-lm
943
+ # created per batch, so dimension of num_precontext should be [batch, 1]
944
+ if not (
945
+ len(num_precontext.shape) == 2 and num_precontext.shape[1] == 1
946
+ ): # num_precontext Should be [batch,1]
947
+ raise ValueError("num_precontext should be [batch, 1] size.")
948
+ num_precontext = torch.reshape(num_precontext, [-1])
949
+ else:
950
+ num_precontext = torch.zeros([num_batch]).int().to(device)
951
+
952
+ num_input_contexts = input_ids.shape[1]
953
+ num_output_contexts = num_input_contexts + num_pasts_contexts
954
+
955
+ hidden_states = self.embed_tokens(input_ids)
956
+
957
+ if past_key_values is not None:
958
+ pasts_or_spout_value = past_key_values
959
+ elif self.config.d_spout and spout is not None:
960
+ # Make vector from `spout` of GPTSAN to the same shape as past_key_values
961
+ pasts_or_spout_value = self.spout(spout) # projecting `spout` vector
962
+ pasts_or_spout_value = torch.reshape(
963
+ pasts_or_spout_value,
964
+ [
965
+ num_batch,
966
+ self.config.num_layers,
967
+ 2,
968
+ self.config.num_heads,
969
+ num_pasts_contexts,
970
+ self.config.d_model // self.config.num_heads,
971
+ ],
972
+ )
973
+ pasts_or_spout_value = torch.split(pasts_or_spout_value, [1] * self.config.num_layers, dim=1)
974
+ # make same shape as past_key_values
975
+ pasts_or_spout_value = tuple(
976
+ tuple([b.squeeze(1) for b in torch.split(a.squeeze(1), [1, 1], dim=1)]) for a in pasts_or_spout_value
977
+ )
978
+ else:
979
+ pasts_or_spout_value = [None] * self.config.num_layers
980
+
981
+ # Token position considering spout and pasts
982
+ token_position = torch.arange(num_input_contexts).to(device) + num_pasts_contexts
983
+
984
+ if attention_mask is None:
985
+ attention_mask = torch.ones(num_batch, num_input_contexts, device=device)
986
+
987
+ # positions for get position_embeddings
988
+ gather_position = (
989
+ (
990
+ torch.zeros((num_batch, self.config.d_model, num_input_contexts)).to(device)
991
+ + token_position.unsqueeze(0)
992
+ )
993
+ .transpose(1, 2)
994
+ .long()
995
+ )
996
+ # When padding with padding_side="left", zeros line up on the left side of attention_mask, so position_embeddings is shifted accordingly
997
+ gather_position -= (1 - attention_mask).argmin(dim=-1).unsqueeze(1).unsqueeze(2)
998
+ gather_position = torch.clip(gather_position, num_pasts_contexts, self.config.max_position_embeddings - 1)
999
+
1000
+ # attention_mask is applied per batch
1001
+ for i in range(num_batch):
1002
+ hidden_states[i] += torch.gather(self.position_embeddings.weight, dim=0, index=gather_position[i])
1003
+
1004
+ # Create a mask to be used when making the prefix Input length of Prefix-LM variable
1005
+ causal_mask = (
1006
+ torch.tril(torch.ones((num_output_contexts, num_output_contexts), dtype=torch.uint8))
1007
+ .view(1, 1, num_output_contexts, num_output_contexts)
1008
+ .to(device)
1009
+ )
1010
+ prefix_lm_mask = causal_mask[:, :, -num_input_contexts:, :]
1011
+ if token_type_ids is not None:
1012
+ token_type_ids = token_type_ids.unsqueeze(1).unsqueeze(2)
1013
+ prefix_lm_mask = ((prefix_lm_mask + token_type_ids) > 0).float()
1014
+ # Marge prefix_lm_mask and attention_mask
1015
+ extended_attention_mask = prefix_lm_mask * attention_mask.unsqueeze(1).unsqueeze(2)
1016
+
1017
+ # Prepare head mask if needed
1018
+ if head_mask is not None:
1019
+ head_mask = self.get_head_mask(
1020
+ head_mask, self.config.num_switch_layers + self.config.num_ext_layers
1021
+ ) # n_layer x batch x n_heads x N x N
1022
+
1023
+ # outputs
1024
+ present_key_value_states = () if self.config.use_cache or use_cache else None
1025
+ all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None
1026
+ all_attentions = () if self.config.output_attentions or output_attentions else None
1027
+ all_router_probs = () if self.config.output_router_logits or output_router_logits else None
1028
+
1029
+ for layer, past in enumerate(pasts_or_spout_value):
1030
+ if layer == self.config.num_switch_layers:
1031
+ if self.config.num_ext_layers > 0:
1032
+ # extra_position_embeddings are extra position embeddings that are only created when extending the model with code from the original GPTSAN repository. Not used in the default model.
1033
+ # However, it is created when you create an additional layer and partially train only that location.
1034
+ # Therefore, convert_gptsan_tf_checkpoint_to_pytorch.py is used when converting and loading models created in the original GPTSAN repository.
1035
+ for i in range(num_batch):
1036
+ hidden_states[i] += torch.gather(
1037
+ self.extra_position_embeddings.weight, dim=0, index=gather_position[i]
1038
+ )
1039
+
1040
+ output_router_tuple = (
1041
+ self.config.output_router_logits or output_router_logits
1042
+ ) and layer < self.config.num_switch_layers
1043
+ block_output = self.blocks[layer](
1044
+ hidden_states=hidden_states,
1045
+ past_key_value=past,
1046
+ attention_mask=extended_attention_mask,
1047
+ head_mask=head_mask,
1048
+ use_cache=self.config.use_cache or use_cache,
1049
+ output_attentions=self.config.output_attentions or output_attentions,
1050
+ output_router_tuple=output_router_tuple,
1051
+ )
1052
+
1053
+ outpos = 0
1054
+ hidden_states = block_output[outpos]
1055
+ if self.config.output_hidden_states or output_hidden_states:
1056
+ all_hidden_states += (hidden_states,)
1057
+ if self.config.use_cache or use_cache:
1058
+ outpos += 1
1059
+ present = block_output[outpos]
1060
+ present_key_value_states += (present,)
1061
+ if self.config.output_attentions or output_attentions:
1062
+ outpos += 1
1063
+ attention_probs = block_output[outpos]
1064
+ all_attentions += (attention_probs,)
1065
+ if output_router_tuple:
1066
+ outpos += 1
1067
+ router_tuple = block_output[outpos]
1068
+ all_router_probs.append(router_tuple[0])
1069
+
1070
+ hidden_states = self.last_project(hidden_states)
1071
+ hidden_states = self.act(hidden_states)
1072
+
1073
+ if self.config.output_hidden_states or output_hidden_states:
1074
+ all_hidden_states = all_hidden_states + (hidden_states,)
1075
+
1076
+ if not return_dict:
1077
+ return tuple(
1078
+ v
1079
+ for v in [
1080
+ hidden_states,
1081
+ present_key_value_states,
1082
+ all_hidden_states,
1083
+ all_attentions,
1084
+ all_router_probs,
1085
+ ]
1086
+ if v is not None
1087
+ )
1088
+
1089
+ return MoEModelOutputWithPastAndCrossAttentions(
1090
+ last_hidden_state=hidden_states,
1091
+ past_key_values=present_key_value_states,
1092
+ hidden_states=all_hidden_states,
1093
+ attentions=all_attentions,
1094
+ router_probs=all_router_probs,
1095
+ )
1096
+
1097
+
1098
+ @add_start_docstrings(
1099
+ "The bare GPTSAN-japanese Model with a language modeling head.",
1100
+ GPTSAN_JAPANESE_START_DOCSTRING,
1101
+ )
1102
+ class GPTSanJapaneseForConditionalGeneration(GPTSanJapanesePreTrainedModel):
1103
+ _tied_weights_keys = ["lm_head.weight"]
1104
+
1105
+ def __init__(self, config: GPTSanJapaneseConfig):
1106
+ super().__init__(config)
1107
+ self.model = GPTSanJapaneseModel(config)
1108
+ self.register_buffer("final_logits_bias", torch.zeros([1, config.vocab_size]))
1109
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
1110
+ if not self.config.torchscript:
1111
+ self.lm_head.weight = self.model.embed_tokens.weight
1112
+
1113
+ @add_start_docstrings_to_model_forward(GPTSAN_JAPANESE_INPUTS_DOCSTRING)
1114
+ def forward(
1115
+ self,
1116
+ input_ids: Optional[torch.LongTensor] = None,
1117
+ attention_mask: Optional[torch.FloatTensor] = None,
1118
+ token_type_ids: Optional[torch.FloatTensor] = None,
1119
+ spout: Optional[torch.FloatTensor] = None,
1120
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1121
+ head_mask: Optional[torch.FloatTensor] = None,
1122
+ use_cache: Optional[bool] = False,
1123
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1124
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
1125
+ output_attentions: Optional[bool] = None,
1126
+ output_hidden_states: Optional[bool] = None,
1127
+ return_dict: Optional[bool] = None,
1128
+ output_router_logits: Optional[bool] = None,
1129
+ labels: Optional[torch.LongTensor] = None,
1130
+ ) -> Union[Tuple[torch.FloatTensor], MoECausalLMOutputWithPast]:
1131
+ r"""
1132
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1133
+ Labels for computing the sequence classification loss. Indices should be in `[-100, 0, ...,
1134
+ config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
1135
+ labels in `[0, ..., config.vocab_size]`
1136
+
1137
+ Returns:
1138
+ `MoECausalLMOutputWithPast` or `tuple` if `return_dict` returns MoECausalLMOutputWithPast insted of tuple
1139
+
1140
+ Example:
1141
+
1142
+ Text Generation with regular LM Model
1143
+ ```python
1144
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1145
+
1146
+ >>> device = "cuda"
1147
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1148
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1149
+ >>> x_token = tokenizer("織田信長は、", return_tensors="pt")
1150
+ >>> trainer_utils.set_seed(30)
1151
+ >>> input_ids = x_token.input_ids.to(device)
1152
+ >>> gen_token = model.generate(input_ids, max_new_tokens=50)
1153
+ >>> tokenizer.decode(gen_token[0])
1154
+ "織田信長は、政治・軍事の中枢まで掌握した政治家であり、日本史上類を見ない驚異的な軍事侵攻を続け..."
1155
+ ```
1156
+
1157
+ Text Generation with Prefix-LM Model
1158
+ ```python
1159
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1160
+
1161
+ >>> device = "cuda"
1162
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1163
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1164
+ >>> x_token = tokenizer("", prefix_text="織田信長は、", return_tensors="pt")
1165
+ >>> trainer_utils.set_seed(30)
1166
+ >>> input_ids = x_token.input_ids.to(device)
1167
+ >>> token_type_ids = x_token.token_type_ids.to(device)
1168
+ >>> gen_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
1169
+ >>> tokenizer.decode(gen_token[0])
1170
+ "織田信長は、政治・外交で数々の戦果を上げるが、1568年からは、いわゆる本能寺の変で細川晴元に暗殺される..."
1171
+ ```
1172
+
1173
+ Simultaneously Text Generation And Masked Language Model
1174
+ ```python
1175
+ >>> from transformers import AutoModel, AutoTokenizer, trainer_utils
1176
+
1177
+ >>> device = "cuda"
1178
+ >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
1179
+ >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
1180
+ >>> masked_sentence = "武田信玄は、<|inputmask|>時代ファンならぜひ押さえ<|inputmask|>きたい名将の一人。"
1181
+ >>> x_token = tokenizer("", prefix_text=masked_sentence, return_tensors="pt")
1182
+ >>> trainer_utils.set_seed(30)
1183
+ >>> input_ids = x_token.input_ids.to(device)
1184
+ >>> token_type_ids = x_token.token_type_ids.to(device)
1185
+ >>> out_lm_token = model.generate(input_ids, token_type_ids=token_type_ids, max_new_tokens=50)
1186
+ >>> out_mlm_token = model(input_ids, token_type_ids=token_type_ids).logits.argmax(axis=-1)
1187
+ >>> tokenizer.decode(out_mlm_token[0])
1188
+ "武田信玄は、戦国時代ファンならぜひ押さえておきたい名将の一人。"
1189
+
1190
+ >>> tokenizer.decode(out_lm_token[0][input_ids.shape[1] :])
1191
+ "武田氏の三代に渡った武田家のひとり\n甲斐市に住む、日本史上最大の戦国大名。..."
1192
+ ```"""
1193
+ SEG_TOKEN = self.config.separator_token_id
1194
+ use_cache = use_cache or self.config.use_cache
1195
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1196
+ model_return_dict = True
1197
+ num_precontext = None
1198
+ if input_ids is not None:
1199
+ num_batch = input_ids.shape[0]
1200
+ num_precontext = torch.zeros([num_batch]).int().to(input_ids.device)
1201
+ where_separators = torch.where(input_ids == SEG_TOKEN)
1202
+ num_precontext[where_separators[0]] += where_separators[1]
1203
+ num_precontext = num_precontext.unsqueeze(1)
1204
+
1205
+ outputs = self.model(
1206
+ input_ids,
1207
+ attention_mask,
1208
+ token_type_ids,
1209
+ spout,
1210
+ past_key_values,
1211
+ head_mask,
1212
+ use_cache,
1213
+ inputs_embeds,
1214
+ decoder_inputs_embeds,
1215
+ output_attentions,
1216
+ output_hidden_states,
1217
+ model_return_dict,
1218
+ output_router_logits,
1219
+ num_precontext,
1220
+ )
1221
+
1222
+ lm_logits = self.lm_head(outputs[0])
1223
+ if lm_logits.shape[-1] == self.final_logits_bias.shape[-1]:
1224
+ lm_logits = lm_logits + self.final_logits_bias
1225
+
1226
+ loss = None
1227
+ z_loss = None
1228
+ router_probs = None
1229
+ aux_loss = None
1230
+ if labels is not None:
1231
+ # move labels to correct device to enable model parallelism
1232
+ labels = labels.to(lm_logits.device)
1233
+
1234
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
1235
+
1236
+ if output_router_logits:
1237
+ # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder
1238
+ router_logits, expert_indexes = self._unpack_router_logits(outputs.router_probs)
1239
+ z_loss = router_z_loss_func(router_logits)
1240
+ router_probs = nn.Softmax(dim=-1)(router_logits)
1241
+ aux_loss = load_balancing_loss_func(router_probs, expert_indexes)
1242
+
1243
+ loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
1244
+
1245
+ if not return_dict:
1246
+ return tuple(
1247
+ v
1248
+ for v in [
1249
+ loss,
1250
+ lm_logits,
1251
+ outputs.past_key_values,
1252
+ outputs.hidden_states,
1253
+ outputs.router_probs,
1254
+ z_loss,
1255
+ aux_loss,
1256
+ ]
1257
+ if v is not None
1258
+ )
1259
+
1260
+ return MoECausalLMOutputWithPast(
1261
+ loss=loss,
1262
+ logits=lm_logits,
1263
+ past_key_values=outputs.past_key_values,
1264
+ hidden_states=outputs.hidden_states,
1265
+ attentions=outputs.attentions,
1266
+ router_logits=outputs.router_probs,
1267
+ z_loss=z_loss,
1268
+ aux_loss=aux_loss,
1269
+ )
1270
+
1271
+ def prepare_inputs_for_generation(
1272
+ self,
1273
+ input_ids: torch.LongTensor,
1274
+ attention_mask: torch.FloatTensor,
1275
+ token_type_ids: Optional[torch.FloatTensor] = None,
1276
+ spout: Optional[Union[List, torch.FloatTensor]] = None,
1277
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1278
+ **kwargs,
1279
+ ):
1280
+ if isinstance(spout, list):
1281
+ spout = torch.tensor(spout).float()
1282
+ if input_ids is not None:
1283
+ spout = spout.to(input_ids.device)
1284
+ if past_key_values is not None:
1285
+ return {
1286
+ "input_ids": input_ids[:, -1:] if input_ids is not None else None,
1287
+ "attention_mask": attention_mask,
1288
+ "token_type_ids": token_type_ids[:, -1:] if token_type_ids is not None else None,
1289
+ "spout": spout,
1290
+ "past_key_values": past_key_values,
1291
+ }
1292
+ return {
1293
+ "input_ids": input_ids,
1294
+ "attention_mask": attention_mask,
1295
+ "token_type_ids": token_type_ids,
1296
+ "spout": spout,
1297
+ "past_key_values": None,
1298
+ }
1299
+
1300
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.prepare_decoder_input_ids_from_labels with SwitchTransformers->GPTSanJapanese
1301
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
1302
+ return self._shift_right(labels)
1303
+
1304
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration.resize_token_embeddings with MBart->GPTSanJapanese
1305
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1306
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1307
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
1308
+ return new_embeddings
1309
+
1310
+ # Copied from transformers.models.mbart.modeling_mbart.MBartForConditionalGeneration._resize_final_logits_bias with MBart->GPTSanJapanese
1311
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
1312
+ old_num_tokens = self.final_logits_bias.shape[-1]
1313
+ if new_num_tokens <= old_num_tokens:
1314
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
1315
+ else:
1316
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
1317
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
1318
+ self.register_buffer("final_logits_bias", new_bias)
1319
+
1320
+ def get_input_embeddings(self):
1321
+ return self.model.get_input_embeddings()
1322
+
1323
+ def set_input_embeddings(self, new_embeddings):
1324
+ self.model.set_input_embeddings(new_embeddings)
1325
+
1326
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.set_output_embeddings with SwitchTransformers->GPTSanJapanese
1327
+ def set_output_embeddings(self, new_embeddings):
1328
+ self.lm_head = new_embeddings
1329
+
1330
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration.get_output_embeddings with SwitchTransformers->GPTSanJapanese
1331
+ def get_output_embeddings(self):
1332
+ return self.lm_head
1333
+
1334
+ # Copied from transformers.models.switch_transformers.modeling_switch_transformers.SwitchTransformersForConditionalGeneration._unpack_router_logits with SwitchTransformers->GPTSanJapanese
1335
+ def _unpack_router_logits(self, router_outputs):
1336
+ total_router_logits = []
1337
+ total_expert_indexes = []
1338
+ for router_output in router_outputs:
1339
+ if len(router_output[0].shape) > 1:
1340
+ router_logits, expert_indexes = router_output
1341
+ total_router_logits.append(router_logits)
1342
+ total_expert_indexes.append(expert_indexes)
1343
+ return torch.cat(total_router_logits, dim=1), torch.cat(total_expert_indexes, dim=1)
venv/lib/python3.10/site-packages/transformers/models/gptsan_japanese/tokenization_gptsan_japanese.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for GPTSANJapanese."""
16
+ import collections
17
+ import json
18
+ import os
19
+ import re
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...tokenization_utils_base import (
26
+ BatchEncoding,
27
+ PreTokenizedInput,
28
+ PreTokenizedInputPair,
29
+ TextInput,
30
+ TextInputPair,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import PaddingStrategy, logging
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
39
+
40
+
41
+ def load_vocab_and_emoji(vocab_file, emoji_file):
42
+ """Loads a vocabulary file and emoji file into a dictionary."""
43
+ with open(emoji_file, "r", encoding="utf-8") as f:
44
+ emoji = json.loads(f.read())
45
+
46
+ vocab = collections.OrderedDict()
47
+ raw_vocab = collections.OrderedDict()
48
+ ids_to_tokens = collections.OrderedDict()
49
+ with open(vocab_file, "r", encoding="utf-8") as f:
50
+ token = f.readlines()
51
+ token = [[t.rstrip("\n")] if (t == ",\n" or "," not in t) else t.rstrip("\n").split(",") for t in token]
52
+ for idx, b in enumerate(token):
53
+ ids_to_tokens[idx] = b
54
+ raw_vocab[",".join(b)] = idx
55
+ for wd in b:
56
+ vocab[wd] = idx
57
+
58
+ return vocab, raw_vocab, ids_to_tokens, emoji
59
+
60
+
61
+ class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
62
+ """
63
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
64
+ - Decoding byte0~byte255 tokens correctly
65
+ - Added bagofword token handling
66
+ - Return token_type_ids for Prefix-LM model
67
+ The bagofword token represents a repetition of the previous token and is converted to 3 consecutive tokens when
68
+ decoding In addition, the original Japanese special Sub-Word-Encoding has been released in this repository
69
+ (https://github.com/tanreinama/Japanese-BPEEncoder_V2). The token_type_ids is a mask indicating the prefix input
70
+ position of the Prefix-LM model. To specify a prefix position, specify a prefix input for prefix_text, or specify a
71
+ sentence of the prefix part and the part after it as a text pair of batch input.
72
+
73
+ Example:
74
+
75
+ ```python
76
+ >>> from transformers import GPTSanJapaneseTokenizer
77
+
78
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
79
+ >>> # You can confirm both 慶応 and 慶應 are encoded to 17750
80
+ >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
81
+ [35993, 35998, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
82
+
83
+ >>> # Both 慶応 and 慶應 are decoded to 慶応
84
+ >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
85
+ '吾輩は猫である🐯。実は慶応(慶応)大学出身'
86
+ ```
87
+
88
+ Example for Prefix-LM:
89
+
90
+ ```python
91
+ >>> from transformers import GPTSanJapaneseTokenizer
92
+
93
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
94
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["input_ids"]
95
+ [35993, 34347, 31459, 30647, 31448, 25, 30659, 35729, 35676, 35998, 32417, 30647, 17750, 35589, 17750, 35590, 321, 1281]
96
+
97
+ >>> # Mask for Prefix-LM inputs
98
+ >>> tokenizer("実は慶応(慶應)大学出身", prefix_text="吾輩は猫である🐯。")["token_type_ids"]
99
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
100
+ ```
101
+
102
+ Example for batch encode:
103
+
104
+ ```python
105
+ >>> from transformers import GPTSanJapaneseTokenizer
106
+
107
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
108
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
109
+ [[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]]
110
+
111
+ >>> # Mask for Prefix-LM inputs
112
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
113
+ [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
114
+
115
+ >>> # Mask for padding
116
+ >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
117
+ [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
118
+ ```
119
+
120
+ Args:
121
+ vocab_file (`str`):
122
+ File containing the vocabulary.
123
+ emoji_file (`str`):
124
+ File containing the emoji.
125
+ unk_token (`str`, *optional*, defaults to `"<|nottoken|>"`):
126
+ The token used for unknown charactor
127
+ pad_token (`str`, *optional*, defaults to `"<|separator|>"`):
128
+ The token used for padding
129
+ bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
130
+ The beginning of sequence token.
131
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
132
+ The end of sequence token.
133
+ sep_token (`str`, *optional*, defaults to `"<|segmenter|>"`):
134
+ A special token to separate token to prefix part and general input part.
135
+ do_clean_text (`bool`, *optional*, defaults to `False`):
136
+ Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
137
+ """
138
+
139
+ vocab_files_names = VOCAB_FILES_NAMES
140
+ model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
141
+
142
+ def __init__(
143
+ self,
144
+ vocab_file,
145
+ emoji_file,
146
+ unk_token="<|nottoken|>",
147
+ pad_token="<|separator|>",
148
+ bos_token="<|startoftext|>",
149
+ eos_token="<|endoftext|>",
150
+ sep_token="<|segmenter|>",
151
+ do_clean_text=False,
152
+ **kwargs,
153
+ ):
154
+ if not os.path.isfile(vocab_file):
155
+ raise ValueError(
156
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
157
+ " model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
158
+ )
159
+ if not os.path.isfile(emoji_file):
160
+ raise ValueError(
161
+ f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
162
+ " pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
163
+ )
164
+ self.do_clean_text = do_clean_text
165
+ self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
166
+ self.subword_tokenizer = SubWordJapaneseTokenizer(
167
+ vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji
168
+ )
169
+
170
+ super().__init__(
171
+ unk_token=unk_token,
172
+ pad_token=pad_token,
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ sep_token=sep_token,
176
+ do_clean_text=do_clean_text,
177
+ **kwargs,
178
+ )
179
+
180
+ @property
181
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.vocab_size
182
+ def vocab_size(self):
183
+ # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
184
+ return len(self.raw_vocab)
185
+
186
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.get_vocab
187
+ def get_vocab(self):
188
+ return dict(self.raw_vocab, **self.added_tokens_encoder)
189
+
190
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._tokenize
191
+ def _tokenize(self, text):
192
+ return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
193
+
194
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_token_to_id
195
+ def _convert_token_to_id(self, token):
196
+ """Converts a token (str) in an id using the vocab."""
197
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
198
+
199
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer._convert_id_to_token
200
+ def _convert_id_to_token(self, index):
201
+ """Converts an index (integer) in a token (str) using the vocab."""
202
+ return self.subword_tokenizer.convert_id_to_token(index)
203
+
204
+ def convert_tokens_to_string(self, tokens):
205
+ """Converts a sequence of tokens (string) in a single string."""
206
+ words = []
207
+ byte_tokens = []
208
+ for word in tokens:
209
+ if word[:6] == "<|byte" and word[-2:] == "|>":
210
+ byte_tokens.append(int(word[6:-2]))
211
+ else:
212
+ if len(byte_tokens) > 0:
213
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
214
+ byte_tokens = []
215
+ if word[:7] == "<|emoji" and word[-2:] == "|>":
216
+ words.append(self.emoji["emoji_inv"][word])
217
+ elif word == "<SP>":
218
+ words.append(" ")
219
+ elif word == "<BR>":
220
+ words.append("\n")
221
+ elif word == "<TAB>":
222
+ words.append("\t")
223
+ elif word == "<BLOCK>":
224
+ words.append("▀")
225
+ elif word == "<KIGOU>":
226
+ words.append("ǀ")
227
+ elif word == "<U2000U2BFF>":
228
+ words.append("‖")
229
+ elif word == "<|bagoftoken|>":
230
+ if len(words) > 0:
231
+ words.append(words[-1])
232
+ words.append(words[-1])
233
+ words.append(words[-1])
234
+ elif word.startswith("<|") and word.endswith("|>"):
235
+ words.append("")
236
+ else:
237
+ words.append(word)
238
+ if len(byte_tokens) > 0:
239
+ words.append(bytearray(byte_tokens).decode("utf-8", errors="replace"))
240
+ text = "".join(words)
241
+ return text
242
+
243
+ @property
244
+ def default_chat_template(self):
245
+ """
246
+ A simple chat template that adds standard BOS, SEP and EOS tokens between messages while discarding role
247
+ information.
248
+ """
249
+ logger.warning_once(
250
+ "\nNo chat template is defined for this tokenizer - using the default template "
251
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
252
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
253
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
254
+ )
255
+ return (
256
+ "{% for message in messages %}"
257
+ "{% if not loop.first %}{{ bos_token}}{% endif %}"
258
+ "{{ sep_token }}{{ message.content }} {{ eos_token }}"
259
+ "{% endfor %}"
260
+ )
261
+
262
+ # Copied from tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer.save_vocabulary
263
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
264
+ index = 0
265
+ if os.path.isdir(save_directory):
266
+ vocab_file = os.path.join(
267
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
268
+ )
269
+ emoji_file = os.path.join(
270
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]
271
+ )
272
+ else:
273
+ vocab_file = (
274
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
275
+ )
276
+ emoji_file = (
277
+ (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
278
+ )
279
+ with open(vocab_file, "w", encoding="utf-8") as writer:
280
+ for token_index, token in self.ids_to_tokens.items():
281
+ if index != token_index:
282
+ logger.warning(
283
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
284
+ " Please check that the vocabulary is not corrupted!"
285
+ )
286
+ index = token_index
287
+ writer.write(",".join(token) + "\n")
288
+ index += 1
289
+ with open(emoji_file, "w", encoding="utf-8") as writer:
290
+ json.dump(self.emoji, writer)
291
+ return vocab_file, emoji_file
292
+
293
+ def create_token_type_ids_from_sequences(
294
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
295
+ ) -> List[int]:
296
+ # docstyle-ignore
297
+ """
298
+ The tokenizer returns token_type_ids as separators between the Prefix part and the rest.
299
+ token_type_ids is 1 for the Prefix part and 0 for the rest of the token.
300
+
301
+ Example:
302
+ ```python
303
+ >>> from transformers import GPTSanJapaneseTokenizer
304
+
305
+ >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
306
+ >>> x_token = tokenizer("アイウエ")
307
+ >>> # input_ids: | SOT | SEG | ア | イ | ウ | エ |
308
+ >>> # token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 |
309
+
310
+ >>> x_token = tokenizer("", prefix_text="アイウエ")
311
+ >>> # input_ids: | SOT | ア | イ | ウ | エ | SEG |
312
+ >>> # token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 |
313
+
314
+ >>> x_token = tokenizer("ウエ", prefix_text="アイ")
315
+ >>> # input_ids: | SOT | ア | イ | SEG | ウ | エ |
316
+ >>> # token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 |
317
+ ```"""
318
+ prefix_len = 0
319
+ if self.sep_token in self.vocab:
320
+ segid = self.vocab[self.sep_token]
321
+ if segid in token_ids_0:
322
+ prefix_len = token_ids_0.index(segid)
323
+ if token_ids_1 is None:
324
+ total_len = len(token_ids_0)
325
+ else:
326
+ total_len = len(token_ids_0 + token_ids_1)
327
+ return prefix_len * [1] + (total_len - prefix_len) * [0]
328
+
329
+ def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
330
+ # GPTSAN inserts extra SEP tokens in Prefix-LM in addition to SOT for text generation.
331
+ # SOT at the beginning of the text, and SEP at the separator between the Prefix part and the rest.
332
+ if add_sep_token is None:
333
+ add_sep_token = self.sep_token not in text # If insert un-prefix position explicitly
334
+ prepared = self.bos_token if self.bos_token in self.vocab else ""
335
+ prepared += prefix_text if prefix_text is not None else ""
336
+ if add_sep_token:
337
+ prepared += self.sep_token if self.sep_token in self.vocab else ""
338
+ prepared += text
339
+ return (prepared, kwargs)
340
+
341
+ def _batch_encode_plus(
342
+ self,
343
+ batch_text_or_text_pairs: Union[
344
+ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
345
+ ],
346
+ add_special_tokens: bool = True,
347
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
348
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
349
+ max_length: Optional[int] = None,
350
+ stride: int = 0,
351
+ is_split_into_words: bool = False,
352
+ pad_to_multiple_of: Optional[int] = None,
353
+ return_tensors: Optional[str] = None,
354
+ return_token_type_ids: Optional[bool] = None,
355
+ return_attention_mask: Optional[bool] = None,
356
+ return_overflowing_tokens: bool = False,
357
+ return_special_tokens_mask: bool = False,
358
+ return_offsets_mapping: bool = False,
359
+ return_length: bool = False,
360
+ verbose: bool = True,
361
+ ) -> BatchEncoding:
362
+ # This tokenizer converts input text pairs into Prefix input and subsequent input
363
+ if isinstance(batch_text_or_text_pairs[0], tuple) or isinstance(tuple(batch_text_or_text_pairs[0]), list):
364
+ # As a single text with an explicit un-prefix position
365
+ batch_prefix_texts = []
366
+ for pref, txt in batch_text_or_text_pairs:
367
+ batch_prefix_texts.append(pref + self.sep_token + txt)
368
+ batch_text_or_text_pairs = batch_prefix_texts
369
+
370
+ return super()._batch_encode_plus(
371
+ batch_text_or_text_pairs,
372
+ add_special_tokens,
373
+ padding_strategy,
374
+ truncation_strategy,
375
+ max_length,
376
+ stride,
377
+ is_split_into_words,
378
+ pad_to_multiple_of,
379
+ return_tensors,
380
+ return_token_type_ids,
381
+ return_attention_mask,
382
+ return_overflowing_tokens,
383
+ return_special_tokens_mask,
384
+ return_offsets_mapping,
385
+ return_length,
386
+ verbose,
387
+ )
388
+
389
+
390
+ class SubWordJapaneseTokenizer(object):
391
+ """
392
+ This tokenizer is based on GPTNeoXJapaneseTokenizer and has the following modifications
393
+ - Decoding byte0~byte255 tokens correctly
394
+ - Added bagofword token handling
395
+
396
+ https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the
397
+ original repository.
398
+
399
+ MIT License
400
+
401
+ Copyright (c) 2020 tanreinama
402
+
403
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
404
+ documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
405
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
406
+ permit persons to whom the Software is furnished to do so, subject to the following conditions:
407
+
408
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of
409
+ the Software.
410
+
411
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
412
+ THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
413
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
414
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
415
+ SOFTWARE.
416
+ """
417
+
418
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__init__
419
+ def __init__(self, vocab, ids_to_tokens, emoji):
420
+ self.vocab = vocab # same as swe
421
+ self.ids_to_tokens = ids_to_tokens # same as bpe
422
+ self.emoji = emoji
423
+ self.maxlen = np.max([len(w) for w in self.vocab.keys()])
424
+ self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
425
+ self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
426
+ self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
427
+ self.content_repatter4 = re.compile(
428
+ r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
429
+ )
430
+ self.content_repatter5 = re.compile(
431
+ r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*"
432
+ )
433
+ self.content_repatter6 = re.compile(
434
+ r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*"
435
+ )
436
+ keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
437
+ blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
438
+ self.content_trans1 = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
439
+
440
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.__len__
441
+ def __len__(self):
442
+ return len(self.ids_to_tokens)
443
+
444
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.clean_text
445
+ def clean_text(self, content):
446
+ content = self.content_repatter1.sub("<URL>", content)
447
+ content = self.content_repatter2.sub("<EMAIL>", content)
448
+ content = self.content_repatter3.sub("<TEL>", content)
449
+ content = self.content_repatter4.sub("<DATE>", content)
450
+ content = self.content_repatter5.sub("<DATE>", content)
451
+ content = self.content_repatter6.sub("<PRICE>", content)
452
+ content = content.translate(self.content_trans1)
453
+ while "<BLOCK><BLOCK>" in content:
454
+ content = content.replace("<BLOCK><BLOCK>", "<BLOCK>")
455
+ return content
456
+
457
+ # Copied from tokenization_gpt_neox_japanese.SubWordJapaneseTokenizer.tokenize
458
+ def tokenize(self, text, clean=False):
459
+ text = text.replace(" ", "<SP>")
460
+ text = text.replace(" ", "<SP>")
461
+ text = text.replace("\r\n", "<BR>")
462
+ text = text.replace("\n", "<BR>")
463
+ text = text.replace("\r", "<BR>")
464
+ text = text.replace("\t", "<TAB>")
465
+ text = text.replace("—", "ー")
466
+ text = text.replace("−", "ー")
467
+ for k, v in self.emoji["emoji"].items():
468
+ if k in text:
469
+ text = text.replace(k, v)
470
+ if clean:
471
+ text = self.clean_text(text)
472
+
473
+ def check_simbol(x):
474
+ e = x.encode()
475
+ if len(x) == 1 and len(e) == 2:
476
+ c = (int(e[0]) << 8) + int(e[1])
477
+ if (
478
+ (c >= 0xC2A1 and c <= 0xC2BF)
479
+ or (c >= 0xC780 and c <= 0xC783)
480
+ or (c >= 0xCAB9 and c <= 0xCBBF)
481
+ or (c >= 0xCC80 and c <= 0xCDA2)
482
+ ):
483
+ return True
484
+ return False
485
+
486
+ def checku2e(x):
487
+ e = x.encode()
488
+ if len(x) == 1 and len(e) == 3:
489
+ c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
490
+ if c >= 0xE28080 and c <= 0xE2B07F:
491
+ return True
492
+ return False
493
+
494
+ pos = 0
495
+ result = []
496
+ while pos < len(text):
497
+ end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
498
+ candidates = [] # (token_id, token, pos)
499
+ for e in range(end, pos, -1):
500
+ wd = text[pos:e]
501
+ if wd in self.vocab:
502
+ if wd[0] == "<" and len(wd) > 2:
503
+ candidates = [(self.vocab[wd], wd, e)]
504
+ break
505
+ else:
506
+ candidates.append((self.vocab[wd], wd, e))
507
+ if len(candidates) > 0:
508
+ # the smallest token_id is adopted
509
+ _, wd, e = sorted(candidates, key=lambda x: x[0])[0]
510
+ result.append(wd)
511
+ pos = e
512
+ else:
513
+ end = pos + 1
514
+ wd = text[pos:end]
515
+ if check_simbol(wd):
516
+ result.append("<KIGOU>")
517
+ elif checku2e(wd):
518
+ result.append("<U2000U2BFF>")
519
+ else:
520
+ for i in wd.encode("utf-8"):
521
+ result.append("<|byte%d|>" % i)
522
+ pos = end
523
+ return result
524
+
525
+ def convert_id_to_token(self, index):
526
+ return self.ids_to_tokens[index][0]