applied-ai-018 commited on
Commit
a59e7d4
·
verified ·
1 Parent(s): 8de9c60

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/config.yaml +43 -0
  5. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log +28 -0
  6. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/requirements.txt +163 -0
  7. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-metadata.json +810 -0
  8. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-summary.json +1 -0
  9. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug-internal.log +194 -0
  10. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug.log +29 -0
  11. lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/run-rasgu64a.wandb +0 -0
  12. lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/files/output.log +34 -0
  13. lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/files/wandb-summary.json +1 -0
  14. lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/run-lrp73hbe.wandb +0 -0
  15. lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/config.yaml +43 -0
  16. lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log +34 -0
  17. lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-metadata.json +850 -0
  18. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/config.yaml +375 -0
  19. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/media/table/evaluation/eval_results_1_fd1718bec4834f9c9150.table.json +1 -0
  20. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/output.log +744 -0
  21. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/requirements.txt +154 -0
  22. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/wandb-metadata.json +850 -0
  23. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/wandb-summary.json +1 -0
  24. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug-internal.log +0 -0
  25. lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug.log +36 -0
  26. venv/lib/python3.10/site-packages/transformers/models/flava/__init__.py +97 -0
  27. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/configuration_flava.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py +764 -0
  36. venv/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py +102 -0
  37. venv/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py +99 -0
  38. venv/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py +33 -0
  39. venv/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py +738 -0
  40. venv/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py +2098 -0
  41. venv/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py +165 -0
  42. venv/lib/python3.10/site-packages/transformers/models/led/__init__.py +101 -0
  43. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/configuration_led.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/modeling_led.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/modeling_tf_led.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/tokenization_led.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/tokenization_led_fast.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/led/configuration_led.py +165 -0
  50. venv/lib/python3.10/site-packages/transformers/models/led/modeling_led.py +0 -0
ckpts/universal/global_step20/zero/18.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b68e80fc2874b1478eece6e64ef78a618d609de49afe97a64f4bfde114f8b14d
3
+ size 9372
ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed253e63885ca37c847510ebfc2afa69c203b2413dbc3a97a75cdf5947ccd43
3
+ size 33555627
ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f773d17a84e0b89a11e905db09004ed983a4126be16204874aa0dfbaee0addc8
3
+ size 33555533
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.40.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1715687084
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.40.2
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-14:11:44:44,636 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-14:11:44:49,674 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi']
4
+ 2024-05-14:11:44:49,676 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-14:11:44:49,676 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'}
6
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.)
7
+ return func(*args, **kwargs)
8
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
9
+ warnings.warn(
10
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead.
11
+ warnings.warn(
12
+ You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
13
+ 2024-05-14:11:44:58,230 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean
14
+ 2024-05-14:11:44:58,230 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
15
+ [2024-05-14 11:44:57,815] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect)
16
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA
17
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
18
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
19
+ warnings.warn(
20
+ 2024-05-14:11:45:03,691 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
21
+ 2024-05-14:11:45:03,692 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
22
+ 2024-05-14:11:45:03,711 INFO [task.py:395] Building contexts for indiccopa-hi on rank 2...
23
+ 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 105469.70it/s]
24
+ 2024-05-14:11:45:05,430 INFO [evaluator.py:379] Running loglikelihood requests
25
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
26
+ warnings.warn(
27
+ Passed argument batch_size = auto:1. Detecting largest batch size
28
+ Determined largest batch size: 64
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/requirements.txt ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.3
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.2
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.4
12
+ aiosignal==1.3.1
13
+ antlr4-python3-runtime==4.9.3
14
+ anyio==4.3.0
15
+ async-timeout==4.0.3
16
+ attrs==23.2.0
17
+ av==9.2.0
18
+ cachetools==5.3.3
19
+ certifi==2024.2.2
20
+ cffi==1.15.1
21
+ cfgv==3.4.0
22
+ chardet==5.2.0
23
+ charset-normalizer==3.3.2
24
+ click==8.1.7
25
+ cmake==3.29.2
26
+ colorama==0.4.6
27
+ datasets==2.19.1
28
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
29
+ dill==0.3.8
30
+ distlib==0.3.8
31
+ distro==1.9.0
32
+ docker-pycreds==0.4.0
33
+ einops==0.8.0
34
+ evaluate==0.4.2
35
+ exceptiongroup==1.2.0
36
+ expecttest==0.2.1
37
+ filelock==3.13.4
38
+ frozenlist==1.4.1
39
+ fsspec==2024.3.1
40
+ gitdb==4.0.11
41
+ google-auth-oauthlib==0.4.6
42
+ google-auth==2.29.0
43
+ grpcio==1.62.1
44
+ h11==0.14.0
45
+ habana-media-loader==1.15.1.15
46
+ habana-pyhlml==1.15.1.15
47
+ habana-torch-dataloader==1.15.1.15
48
+ habana-torch-plugin==1.15.1.15
49
+ habana_gpu_migration==1.15.1.15
50
+ habana_quantization_toolkit==1.15.1.15
51
+ hjson==3.1.0
52
+ httpcore==1.0.5
53
+ httpx==0.27.0
54
+ huggingface-hub==0.23.0
55
+ identify==2.5.35
56
+ idna==3.7
57
+ importlib_resources==6.4.0
58
+ iniconfig==2.0.0
59
+ joblib==1.4.2
60
+ jsonlines==4.0.0
61
+ lightning-habana==1.4.0
62
+ lightning-utilities==0.11.2
63
+ lightning==2.2.0.post0
64
+ lm_eval==0.3.0
65
+ lm_eval==0.4.2
66
+ lm_eval==0.4.2
67
+ lm_eval==0.4.2
68
+ mbstrdecoder==1.1.3
69
+ more-itertools==10.2.0
70
+ mpi4py==3.1.4
71
+ mpmath==1.3.0
72
+ multidict==6.0.5
73
+ multiprocess==0.70.16
74
+ networkx==3.3
75
+ ninja==1.11.1.1
76
+ nltk==3.8.1
77
+ nodeenv==1.8.0
78
+ numexpr==2.10.0
79
+ numpy==1.23.5
80
+ oauthlib==3.2.2
81
+ omegaconf==2.3.0
82
+ openai==1.29.0
83
+ packaging==24.0
84
+ pandas==2.0.1
85
+ pathspec==0.12.1
86
+ pathvalidate==3.2.0
87
+ peft==0.10.0
88
+ perfetto==0.7.0
89
+ pip==22.0.2
90
+ pip==23.3.1
91
+ platformdirs==4.2.0
92
+ pluggy==1.4.0
93
+ portalocker==2.8.2
94
+ pre-commit==3.3.3
95
+ protobuf==3.20.3
96
+ psutil==5.9.8
97
+ py-cpuinfo==9.0.0
98
+ pyarrow-hotfix==0.6
99
+ pyarrow==16.0.0
100
+ pyasn1==0.6.0
101
+ pyasn1_modules==0.4.0
102
+ pybind11==2.10.4
103
+ pycountry==23.12.11
104
+ pycparser==2.22
105
+ pydantic==1.10.13
106
+ pynvml==8.0.4
107
+ pytablewriter==1.2.0
108
+ pytest==8.1.1
109
+ python-dateutil==2.9.0.post0
110
+ pytorch-lightning==2.2.2
111
+ pytz==2024.1
112
+ regex==2023.5.5
113
+ requests-oauthlib==2.0.0
114
+ requests==2.31.0
115
+ rouge_score==0.1.2
116
+ rsa==4.9
117
+ sacrebleu==1.5.0
118
+ safetensors==0.4.3
119
+ scikit-learn==1.4.2
120
+ scipy==1.13.0
121
+ sentencepiece==0.2.0
122
+ sentry-sdk==2.1.1
123
+ setproctitle==1.3.3
124
+ setuptools==59.6.0
125
+ setuptools==69.5.1
126
+ six==1.16.0
127
+ smmap==5.0.1
128
+ sniffio==1.3.1
129
+ sqlitedict==2.1.0
130
+ symengine==0.11.0
131
+ sympy==1.12
132
+ tabledata==1.3.3
133
+ tcolorpy==0.1.6
134
+ tdqm==0.0.1
135
+ tensorboard-data-server==0.6.1
136
+ tensorboard-plugin-wit==1.8.1
137
+ tensorboard==2.11.2
138
+ threadpoolctl==3.5.0
139
+ tokenizers==0.19.1
140
+ tomli==2.0.1
141
+ torch==2.2.0a0+git8964477
142
+ torch_tb_profiler==0.4.0
143
+ torchaudio==2.2.0+08901ad
144
+ torchdata==0.7.1+5e6f7b7
145
+ torchmetrics==1.3.2
146
+ torchtext==0.17.0+400da5c
147
+ torchvision==0.17.0+b2383d4
148
+ tqdm-multiprocess==0.0.11
149
+ tqdm==4.66.2
150
+ transformers==4.40.2
151
+ typepy==1.3.2
152
+ typing_extensions==4.11.0
153
+ tzdata==2024.1
154
+ urllib3==1.26.18
155
+ virtualenv==20.25.1
156
+ wandb==0.17.0
157
+ wheel==0.37.1
158
+ wheel==0.43.0
159
+ word2number==1.1
160
+ xxhash==3.4.1
161
+ yamllint==1.35.1
162
+ yarl==1.9.4
163
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T11:44:44.495966",
5
+ "startedAt": "2024-05-14T11:44:44.053364",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=global_step100"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3392.622177631579,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3300.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3300.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3400.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3400.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3400.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3400.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3400.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3400.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3400.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3400.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3400.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3400.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3400.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3400.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3300.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3400.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3400.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3400.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3400.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3307.425,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3400.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3400.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3400.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3400.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3400.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3400.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3400.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3400.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3400.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3400.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 77.77701187133789
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 30}}
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug-internal.log ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 11:44:44,070 INFO StreamThr :84108 [internal.py:wandb_internal():85] W&B internal server running at pid: 84108, started at: 2024-05-14 11:44:44.069840
2
+ 2024-05-14 11:44:44,072 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 11:44:44,074 INFO WriterThread:84108 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/run-rasgu64a.wandb
4
+ 2024-05-14 11:44:44,075 DEBUG SenderThread:84108 [sender.py:send():378] send: header
5
+ 2024-05-14 11:44:44,085 DEBUG SenderThread:84108 [sender.py:send():378] send: run
6
+ 2024-05-14 11:44:44,350 INFO SenderThread:84108 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files
7
+ 2024-05-14 11:44:44,350 INFO SenderThread:84108 [sender.py:_start_run_threads():1123] run started: rasgu64a with start time 1715687084.069144
8
+ 2024-05-14 11:44:44,361 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-14 11:44:44,362 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-14 11:44:44,443 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-14 11:44:44,445 DEBUG HandlerThread:84108 [system_info.py:__init__():26] System info init
12
+ 2024-05-14 11:44:44,445 DEBUG HandlerThread:84108 [system_info.py:__init__():41] System info init done
13
+ 2024-05-14 11:44:44,445 INFO HandlerThread:84108 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-14 11:44:44,445 INFO SystemMonitor:84108 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-14 11:44:44,445 INFO HandlerThread:84108 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-14 11:44:44,446 INFO SystemMonitor:84108 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-14 11:44:44,446 INFO SystemMonitor:84108 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-14 11:44:44,446 INFO SystemMonitor:84108 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-14 11:44:44,447 INFO SystemMonitor:84108 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-14 11:44:44,495 DEBUG HandlerThread:84108 [system_info.py:probe():150] Probing system
21
+ 2024-05-14 11:44:44,504 DEBUG HandlerThread:84108 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-14 11:44:44,524 ERROR HandlerThread:84108 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /data/cronscript/lm-evaluation-harness'
28
+ 2024-05-14 11:44:44,524 DEBUG HandlerThread:84108 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-14 11:44:44,524 DEBUG HandlerThread:84108 [system_info.py:probe():198] Probing system done
30
+ 2024-05-14 11:44:44,524 DEBUG HandlerThread:84108 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T11:44:44.495966', 'startedAt': '2024-05-14T11:44:44.053364', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=global_step100'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.622177631579, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3307.425, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 77.77701187133789}}, 'memory': {'total': 1007.5000267028809}}
31
+ 2024-05-14 11:44:44,524 INFO HandlerThread:84108 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-14 11:44:44,524 INFO HandlerThread:84108 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-14 11:44:44,525 INFO HandlerThread:84108 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-14 11:44:44,529 DEBUG SenderThread:84108 [sender.py:send():378] send: files
35
+ 2024-05-14 11:44:44,529 INFO SenderThread:84108 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-14 11:44:44,630 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-14 11:44:44,630 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-14 11:44:44,630 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-14 11:44:44,631 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-14 11:44:44,765 DEBUG SenderThread:84108 [sender.py:send():378] send: telemetry
41
+ 2024-05-14 11:44:45,083 INFO wandb-upload_0:84108 [upload_job.py:push():130] Uploaded file /tmp/tmpvib5ewidwandb/14p8uwnu-wandb-metadata.json
42
+ 2024-05-14 11:44:45,352 INFO Thread-12 :84108 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/requirements.txt
43
+ 2024-05-14 11:44:45,352 INFO Thread-12 :84108 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
44
+ 2024-05-14 11:44:45,353 INFO Thread-12 :84108 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-metadata.json
45
+ 2024-05-14 11:44:47,355 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
46
+ 2024-05-14 11:44:49,675 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-14 11:44:51,377 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
48
+ 2024-05-14 11:44:54,677 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-14 11:44:57,386 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
50
+ 2024-05-14 11:44:59,404 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
51
+ 2024-05-14 11:44:59,631 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: stop_status
52
+ 2024-05-14 11:44:59,631 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: stop_status
53
+ 2024-05-14 11:44:59,716 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
54
+ 2024-05-14 11:45:00,405 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
55
+ 2024-05-14 11:45:04,730 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
56
+ 2024-05-14 11:45:05,411 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
57
+ 2024-05-14 11:45:07,421 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
58
+ 2024-05-14 11:45:08,422 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
59
+ 2024-05-14 11:45:09,987 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
60
+ 2024-05-14 11:45:14,524 DEBUG SenderThread:84108 [sender.py:send():378] send: exit
61
+ 2024-05-14 11:45:14,524 INFO SenderThread:84108 [sender.py:send_exit():585] handling exit code: 0
62
+ 2024-05-14 11:45:14,524 INFO SenderThread:84108 [sender.py:send_exit():587] handling runtime: 30
63
+ 2024-05-14 11:45:14,525 INFO SenderThread:84108 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
64
+ 2024-05-14 11:45:14,526 INFO SenderThread:84108 [sender.py:send_exit():593] send defer
65
+ 2024-05-14 11:45:14,526 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
66
+ 2024-05-14 11:45:14,526 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 0
67
+ 2024-05-14 11:45:14,526 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
68
+ 2024-05-14 11:45:14,526 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 0
69
+ 2024-05-14 11:45:14,526 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 1
70
+ 2024-05-14 11:45:14,526 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
71
+ 2024-05-14 11:45:14,526 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 1
72
+ 2024-05-14 11:45:14,527 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
73
+ 2024-05-14 11:45:14,527 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 1
74
+ 2024-05-14 11:45:14,527 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 2
75
+ 2024-05-14 11:45:14,527 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
76
+ 2024-05-14 11:45:14,527 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 2
77
+ 2024-05-14 11:45:14,527 INFO HandlerThread:84108 [system_monitor.py:finish():203] Stopping system monitor
78
+ 2024-05-14 11:45:14,527 DEBUG SystemMonitor:84108 [system_monitor.py:_start():172] Starting system metrics aggregation loop
79
+ 2024-05-14 11:45:14,527 INFO HandlerThread:84108 [interfaces.py:finish():200] Joined cpu monitor
80
+ 2024-05-14 11:45:14,527 DEBUG SystemMonitor:84108 [system_monitor.py:_start():179] Finished system metrics aggregation loop
81
+ 2024-05-14 11:45:14,527 INFO HandlerThread:84108 [interfaces.py:finish():200] Joined disk monitor
82
+ 2024-05-14 11:45:14,527 DEBUG SystemMonitor:84108 [system_monitor.py:_start():183] Publishing last batch of metrics
83
+ 2024-05-14 11:45:14,528 INFO HandlerThread:84108 [interfaces.py:finish():200] Joined memory monitor
84
+ 2024-05-14 11:45:14,529 INFO HandlerThread:84108 [interfaces.py:finish():200] Joined network monitor
85
+ 2024-05-14 11:45:14,529 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-14 11:45:14,529 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 2
87
+ 2024-05-14 11:45:14,529 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 3
88
+ 2024-05-14 11:45:14,530 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-14 11:45:14,530 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 3
90
+ 2024-05-14 11:45:14,530 DEBUG SenderThread:84108 [sender.py:send():378] send: stats
91
+ 2024-05-14 11:45:14,530 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
92
+ 2024-05-14 11:45:14,530 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 3
93
+ 2024-05-14 11:45:14,530 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 4
94
+ 2024-05-14 11:45:14,530 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
95
+ 2024-05-14 11:45:14,531 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 4
96
+ 2024-05-14 11:45:14,531 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
97
+ 2024-05-14 11:45:14,531 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 4
98
+ 2024-05-14 11:45:14,531 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 5
99
+ 2024-05-14 11:45:14,531 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
100
+ 2024-05-14 11:45:14,531 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 5
101
+ 2024-05-14 11:45:14,531 DEBUG SenderThread:84108 [sender.py:send():378] send: summary
102
+ 2024-05-14 11:45:14,532 INFO SenderThread:84108 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
103
+ 2024-05-14 11:45:14,532 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-14 11:45:14,532 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 5
105
+ 2024-05-14 11:45:14,532 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 6
106
+ 2024-05-14 11:45:14,532 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
107
+ 2024-05-14 11:45:14,532 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 6
108
+ 2024-05-14 11:45:14,532 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
109
+ 2024-05-14 11:45:14,532 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 6
110
+ 2024-05-14 11:45:14,535 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: status_report
111
+ 2024-05-14 11:45:14,621 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 7
112
+ 2024-05-14 11:45:14,621 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
113
+ 2024-05-14 11:45:14,621 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 7
114
+ 2024-05-14 11:45:14,622 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
115
+ 2024-05-14 11:45:14,622 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 7
116
+ 2024-05-14 11:45:15,442 INFO Thread-12 :84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/config.yaml
117
+ 2024-05-14 11:45:15,442 INFO Thread-12 :84108 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-summary.json
118
+ 2024-05-14 11:45:15,524 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: poll_exit
119
+ 2024-05-14 11:45:15,836 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 8
120
+ 2024-05-14 11:45:15,836 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: poll_exit
121
+ 2024-05-14 11:45:15,836 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
122
+ 2024-05-14 11:45:15,836 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 8
123
+ 2024-05-14 11:45:15,837 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
124
+ 2024-05-14 11:45:15,837 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 8
125
+ 2024-05-14 11:45:15,837 INFO SenderThread:84108 [job_builder.py:build():432] Attempting to build job artifact
126
+ 2024-05-14 11:45:15,837 INFO SenderThread:84108 [job_builder.py:_get_source_type():576] no source found
127
+ 2024-05-14 11:45:15,837 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 9
128
+ 2024-05-14 11:45:15,837 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
129
+ 2024-05-14 11:45:15,837 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 9
130
+ 2024-05-14 11:45:15,837 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
131
+ 2024-05-14 11:45:15,838 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 9
132
+ 2024-05-14 11:45:15,838 INFO SenderThread:84108 [dir_watcher.py:finish():358] shutting down directory watcher
133
+ 2024-05-14 11:45:16,444 INFO SenderThread:84108 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
134
+ 2024-05-14 11:45:16,444 INFO SenderThread:84108 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files
135
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log output.log
136
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/requirements.txt requirements.txt
137
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/config.yaml config.yaml
138
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-summary.json wandb-summary.json
139
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-metadata.json wandb-metadata.json
140
+ 2024-05-14 11:45:16,445 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 10
141
+ 2024-05-14 11:45:16,447 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-14 11:45:16,447 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 10
143
+ 2024-05-14 11:45:16,449 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-14 11:45:16,449 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 10
145
+ 2024-05-14 11:45:16,449 INFO SenderThread:84108 [file_pusher.py:finish():169] shutting down file pusher
146
+ 2024-05-14 11:45:16,525 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: poll_exit
147
+ 2024-05-14 11:45:16,525 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: poll_exit
148
+ 2024-05-14 11:45:16,693 INFO wandb-upload_1:84108 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/requirements.txt
149
+ 2024-05-14 11:45:16,877 INFO wandb-upload_0:84108 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/output.log
150
+ 2024-05-14 11:45:16,957 INFO wandb-upload_3:84108 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/wandb-summary.json
151
+ 2024-05-14 11:45:16,958 INFO wandb-upload_2:84108 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/files/config.yaml
152
+ 2024-05-14 11:45:17,158 INFO Thread-11 (_thread_body):84108 [sender.py:transition_state():613] send defer: 11
153
+ 2024-05-14 11:45:17,158 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
154
+ 2024-05-14 11:45:17,158 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 11
155
+ 2024-05-14 11:45:17,159 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
156
+ 2024-05-14 11:45:17,159 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 11
157
+ 2024-05-14 11:45:17,159 INFO SenderThread:84108 [file_pusher.py:join():175] waiting for file pusher
158
+ 2024-05-14 11:45:17,159 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 12
159
+ 2024-05-14 11:45:17,159 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-14 11:45:17,159 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 12
161
+ 2024-05-14 11:45:17,159 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
162
+ 2024-05-14 11:45:17,159 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 12
163
+ 2024-05-14 11:45:17,159 INFO SenderThread:84108 [file_stream.py:finish():601] file stream finish called
164
+ 2024-05-14 11:45:17,235 INFO SenderThread:84108 [file_stream.py:finish():605] file stream finish is done
165
+ 2024-05-14 11:45:17,235 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 13
166
+ 2024-05-14 11:45:17,235 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
167
+ 2024-05-14 11:45:17,235 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 13
168
+ 2024-05-14 11:45:17,236 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
169
+ 2024-05-14 11:45:17,236 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 13
170
+ 2024-05-14 11:45:17,236 INFO SenderThread:84108 [sender.py:transition_state():613] send defer: 14
171
+ 2024-05-14 11:45:17,236 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: defer
172
+ 2024-05-14 11:45:17,236 INFO HandlerThread:84108 [handler.py:handle_request_defer():184] handle defer: 14
173
+ 2024-05-14 11:45:17,236 DEBUG SenderThread:84108 [sender.py:send():378] send: final
174
+ 2024-05-14 11:45:17,236 DEBUG SenderThread:84108 [sender.py:send():378] send: footer
175
+ 2024-05-14 11:45:17,236 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: defer
176
+ 2024-05-14 11:45:17,236 INFO SenderThread:84108 [sender.py:send_request_defer():609] handle sender defer: 14
177
+ 2024-05-14 11:45:17,237 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: poll_exit
178
+ 2024-05-14 11:45:17,237 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: poll_exit
179
+ 2024-05-14 11:45:17,237 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: poll_exit
180
+ 2024-05-14 11:45:17,238 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: poll_exit
181
+ 2024-05-14 11:45:17,238 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: server_info
182
+ 2024-05-14 11:45:17,238 DEBUG SenderThread:84108 [sender.py:send_request():405] send_request: server_info
183
+ 2024-05-14 11:45:17,239 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: get_summary
184
+ 2024-05-14 11:45:17,239 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: sampled_history
185
+ 2024-05-14 11:45:17,240 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: internal_messages
186
+ 2024-05-14 11:45:17,296 INFO MainThread:84108 [wandb_run.py:_footer_history_summary_info():3994] rendering history
187
+ 2024-05-14 11:45:17,296 INFO MainThread:84108 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
188
+ 2024-05-14 11:45:17,296 INFO MainThread:84108 [wandb_run.py:_footer_sync_info():3953] logging synced files
189
+ 2024-05-14 11:45:17,296 DEBUG HandlerThread:84108 [handler.py:handle_request():158] handle_request: shutdown
190
+ 2024-05-14 11:45:17,296 INFO HandlerThread:84108 [handler.py:finish():882] shutting down handler
191
+ 2024-05-14 11:45:18,238 INFO WriterThread:84108 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/run-rasgu64a.wandb
192
+ 2024-05-14 11:45:18,295 INFO SenderThread:84108 [sender.py:finish():1545] shutting down sender
193
+ 2024-05-14 11:45:18,295 INFO SenderThread:84108 [file_pusher.py:finish():169] shutting down file pusher
194
+ 2024-05-14 11:45:18,295 INFO SenderThread:84108 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Configure stats pid to 82827
3
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-14 11:44:44,066 WARNING MainThread:82827 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug.log
11
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/logs/debug-internal.log
12
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:init():610] starting backend
16
+ 2024-05-14 11:44:44,066 INFO MainThread:82827 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-14 11:44:44,068 INFO MainThread:82827 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-14 11:44:44,068 INFO MainThread:82827 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-14 11:44:44,071 INFO MainThread:82827 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-14 11:44:44,085 INFO MainThread:82827 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-14 11:44:44,361 INFO MainThread:82827 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-14 11:44:44,439 INFO MainThread:82827 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-14 11:44:44,439 INFO MainThread:82827 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-14 11:44:44,630 INFO MainThread:82827 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-14 11:44:44,631 INFO MainThread:82827 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-14 11:44:44,631 INFO MainThread:82827 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-14 11:44:44,631 INFO MainThread:82827 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-14 11:44:44,633 INFO MainThread:82827 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-14 11:45:18,297 WARNING MsgRouterThr:82827 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240514_114444-rasgu64a/run-rasgu64a.wandb ADDED
Binary file (11.5 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:06:12:44,732 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:06:12:54,111 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:06:12:54,112 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:06:12:54,112 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000'}
6
+ 2024-05-23:06:12:56,415 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_061244-lrp73hbe/run-lrp73hbe.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716469447
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:13:04:08,203 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:13:04:16,734 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:13:04:16,735 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:13:04:16,736 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000'}
6
+ 2024-05-23:13:04:19,146 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_130407-wvnshpcy/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T13:04:07.993231",
5
+ "startedAt": "2024-05-23T13:04:07.485994",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.17863125,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.6171989440918
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379539489746
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/config.yaml ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.1
8
+ framework: huggingface
9
+ huggingface_version: 4.36.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1717873413
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 2
38
+ - 13
39
+ - 23
40
+ - 62
41
+ 4: 3.10.12
42
+ 5: 0.17.1
43
+ 6: 4.36.2
44
+ 8:
45
+ - 5
46
+ 13: linux-x86_64
47
+ task_configs:
48
+ desc: null
49
+ value:
50
+ arc_easy:
51
+ task: arc_easy
52
+ group:
53
+ - ai2_arc
54
+ dataset_path: allenai/ai2_arc
55
+ dataset_name: ARC-Easy
56
+ training_split: train
57
+ validation_split: validation
58
+ test_split: test
59
+ doc_to_text: 'Question: {{question}}
60
+
61
+ Answer:'
62
+ doc_to_target: '{{choices.label.index(answerKey)}}'
63
+ doc_to_choice: '{{choices.text}}'
64
+ description: ''
65
+ target_delimiter: ' '
66
+ fewshot_delimiter: '
67
+
68
+
69
+ '
70
+ num_fewshot: 0
71
+ metric_list:
72
+ - metric: acc
73
+ aggregation: mean
74
+ higher_is_better: true
75
+ - metric: acc_norm
76
+ aggregation: mean
77
+ higher_is_better: true
78
+ output_type: multiple_choice
79
+ repeats: 1
80
+ should_decontaminate: true
81
+ doc_to_decontamination_query: 'Question: {{question}}
82
+
83
+ Answer:'
84
+ metadata:
85
+ version: 1.0
86
+ boolq:
87
+ task: boolq
88
+ group:
89
+ - super-glue-lm-eval-v1
90
+ dataset_path: super_glue
91
+ dataset_name: boolq
92
+ training_split: train
93
+ validation_split: validation
94
+ doc_to_text: '{{passage}}
95
+
96
+ Question: {{question}}?
97
+
98
+ Answer:'
99
+ doc_to_target: label
100
+ doc_to_choice:
101
+ - 'no'
102
+ - 'yes'
103
+ description: ''
104
+ target_delimiter: ' '
105
+ fewshot_delimiter: '
106
+
107
+
108
+ '
109
+ num_fewshot: 0
110
+ metric_list:
111
+ - metric: acc
112
+ output_type: multiple_choice
113
+ repeats: 1
114
+ should_decontaminate: true
115
+ doc_to_decontamination_query: passage
116
+ metadata:
117
+ version: 2.0
118
+ copa:
119
+ task: copa
120
+ group:
121
+ - super-glue-lm-eval-v1
122
+ dataset_path: super_glue
123
+ dataset_name: copa
124
+ training_split: train
125
+ validation_split: validation
126
+ doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\
127
+ \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\
128
+ \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\
129
+ \ {connector}\"\n"
130
+ doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\
131
+ ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\
132
+ \ return \" \" + convert_choice(correct_choice)\n"
133
+ doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\
134
+ choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n"
135
+ description: ''
136
+ target_delimiter: ' '
137
+ fewshot_delimiter: '
138
+
139
+
140
+ '
141
+ num_fewshot: 0
142
+ metric_list:
143
+ - metric: acc
144
+ output_type: multiple_choice
145
+ repeats: 1
146
+ should_decontaminate: false
147
+ metadata:
148
+ version: 1.0
149
+ indic_arc_challenge_hi:
150
+ task: indic_arc_challenge_hi
151
+ group: Cognitive-Lab/Indic-ARC-Challenge
152
+ dataset_path: Cognitive-Lab/Indic-ARC-Challenge
153
+ dataset_name: hi
154
+ test_split: test
155
+ doc_to_text: 'Question: {{translated_question}}
156
+
157
+ Answer:'
158
+ doc_to_target: '{{translated_choices.label.index(answerKey)}}'
159
+ doc_to_choice: '{{translated_choices.text}}'
160
+ description: ''
161
+ target_delimiter: ' '
162
+ fewshot_delimiter: '
163
+
164
+
165
+ '
166
+ num_fewshot: 0
167
+ metric_list:
168
+ - metric: acc
169
+ aggregation: mean
170
+ higher_is_better: true
171
+ output_type: multiple_choice
172
+ repeats: 1
173
+ should_decontaminate: true
174
+ doc_to_decontamination_query: 'Question: {{translated_question}}
175
+
176
+ Answer:'
177
+ metadata:
178
+ version: 1.0
179
+ indic_arc_easy_hi:
180
+ task: indic_arc_easy_hi
181
+ group: Cognitive-Lab/Indic-ARC-Easy
182
+ dataset_path: Cognitive-Lab/Indic-ARC-Easy
183
+ dataset_name: hi
184
+ test_split: test
185
+ doc_to_text: 'Question: {{translated_question}}
186
+
187
+ Answer:'
188
+ doc_to_target: '{{translated_choices.label.index(answerKey)}}'
189
+ doc_to_choice: '{{translated_choices.text}}'
190
+ description: ''
191
+ target_delimiter: ' '
192
+ fewshot_delimiter: '
193
+
194
+
195
+ '
196
+ num_fewshot: 0
197
+ metric_list:
198
+ - metric: acc
199
+ aggregation: mean
200
+ higher_is_better: true
201
+ output_type: multiple_choice
202
+ repeats: 1
203
+ should_decontaminate: true
204
+ doc_to_decontamination_query: 'Question: {{translated_question}}
205
+
206
+ Answer:'
207
+ metadata:
208
+ version: 1.0
209
+ indic_boolq_hi:
210
+ task: indic_boolq_hi
211
+ group: Cognitive-Lab/Indic-BoolQ
212
+ dataset_path: Cognitive-Lab/Indic-BoolQ
213
+ dataset_name: hi
214
+ validation_split: validation
215
+ doc_to_text: 'Passage: {translated_passage}
216
+
217
+ Question: {translated_question.strip()}
218
+
219
+ Answer:'
220
+ doc_to_target: answer
221
+ doc_to_choice:
222
+ - 'true'
223
+ - 'false'
224
+ description: ''
225
+ target_delimiter: ' '
226
+ fewshot_delimiter: '
227
+
228
+
229
+ '
230
+ num_fewshot: 0
231
+ metric_list:
232
+ - metric: acc
233
+ aggregation: mean
234
+ higher_is_better: true
235
+ output_type: multiple_choice
236
+ repeats: 1
237
+ should_decontaminate: false
238
+ metadata:
239
+ version: 1.0
240
+ mrpc:
241
+ task: mrpc
242
+ group: glue
243
+ dataset_path: glue
244
+ dataset_name: mrpc
245
+ training_split: train
246
+ validation_split: validation
247
+ doc_to_text: 'Sentence 1: {{sentence1}}
248
+
249
+ Sentence 2: {{sentence2}}
250
+
251
+ Question: Do both sentences mean the same thing?
252
+
253
+ Answer:'
254
+ doc_to_target: label
255
+ doc_to_choice:
256
+ - 'no'
257
+ - 'yes'
258
+ description: ''
259
+ target_delimiter: ' '
260
+ fewshot_delimiter: '
261
+
262
+
263
+ '
264
+ num_fewshot: 0
265
+ metric_list:
266
+ - metric: acc
267
+ - metric: f1
268
+ output_type: multiple_choice
269
+ repeats: 1
270
+ should_decontaminate: false
271
+ metadata:
272
+ version: 1.0
273
+ piqa:
274
+ task: piqa
275
+ dataset_path: piqa
276
+ training_split: train
277
+ validation_split: validation
278
+ doc_to_text: 'Question: {{goal}}
279
+
280
+ Answer:'
281
+ doc_to_target: label
282
+ doc_to_choice: '{{[sol1, sol2]}}'
283
+ description: ''
284
+ target_delimiter: ' '
285
+ fewshot_delimiter: '
286
+
287
+
288
+ '
289
+ num_fewshot: 0
290
+ metric_list:
291
+ - metric: acc
292
+ aggregation: mean
293
+ higher_is_better: true
294
+ - metric: acc_norm
295
+ aggregation: mean
296
+ higher_is_better: true
297
+ output_type: multiple_choice
298
+ repeats: 1
299
+ should_decontaminate: true
300
+ doc_to_decontamination_query: goal
301
+ metadata:
302
+ version: 1.0
303
+ sst2:
304
+ task: sst2
305
+ group: glue
306
+ dataset_path: glue
307
+ dataset_name: sst2
308
+ training_split: train
309
+ validation_split: validation
310
+ doc_to_text: '{{sentence}}
311
+
312
+ Question: Is this sentence positive or negative?
313
+
314
+ Answer:'
315
+ doc_to_target: label
316
+ doc_to_choice:
317
+ - negative
318
+ - positive
319
+ description: ''
320
+ target_delimiter: ' '
321
+ fewshot_delimiter: '
322
+
323
+
324
+ '
325
+ num_fewshot: 0
326
+ metric_list:
327
+ - metric: acc
328
+ output_type: multiple_choice
329
+ repeats: 1
330
+ should_decontaminate: false
331
+ metadata:
332
+ version: 1.0
333
+ winogrande:
334
+ task: winogrande
335
+ dataset_path: winogrande
336
+ dataset_name: winogrande_xl
337
+ training_split: train
338
+ validation_split: validation
339
+ doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\
340
+ \ return answer_to_num[doc[\"answer\"]]\n"
341
+ doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\
342
+ _\") + 1\n return doc[\"sentence\"][idx:].strip()\n"
343
+ doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\
344
+ _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\
345
+ sentence\"][:idx] + opt for opt in options]\n"
346
+ description: ''
347
+ target_delimiter: ' '
348
+ fewshot_delimiter: '
349
+
350
+
351
+ '
352
+ num_fewshot: 0
353
+ metric_list:
354
+ - metric: acc
355
+ aggregation: mean
356
+ higher_is_better: true
357
+ output_type: multiple_choice
358
+ repeats: 1
359
+ should_decontaminate: true
360
+ doc_to_decontamination_query: sentence
361
+ metadata:
362
+ version: 1.0
363
+ cli_configs:
364
+ desc: null
365
+ value:
366
+ model: hf
367
+ model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer
368
+ batch_size: auto
369
+ batch_sizes:
370
+ - 64
371
+ device: null
372
+ use_cache: null
373
+ limit: null
374
+ bootstrap_iters: 100000
375
+ gen_kwargs: null
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/media/table/evaluation/eval_results_1_fd1718bec4834f9c9150.table.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.5035516969218626", "0.0141"], ["sst2", 1.0, "none", 0, "acc", "0.518348623853211", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5174102285092492", "0.0117"], ["piqa", 1.0, "none", 0, "acc_norm", "0.5048966267682263", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.3161764705882353", "0.0230"], ["mrpc", 1.0, "none", 0, "f1", "0.0", "0.0000"], ["indic_boolq_hi", 1.0, "none", 0, "acc", "0.6217125382262997", "0.0085"], ["indic_arc_easy_hi", 1.0, "none", 0, "acc", "0.2398989898989899", "0.0088"], ["indic_arc_challenge_hi", 1.0, "none", 0, "acc", "0.20819112627986347", "0.0119"], ["copa", 1.0, "none", 0, "acc", "0.6", "0.0492"], ["boolq", 2.0, "none", 0, "acc", "0.3782874617737003", "0.0085"], ["arc_easy", 1.0, "none", 0, "acc", "0.26346801346801346", "0.0090"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2647306397306397", "0.0091"]]}
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/output.log ADDED
@@ -0,0 +1,744 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-06-08:19:03:34,356 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-06-08:19:03:43,617 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'indic_arc_challenge_hi', 'indic_arc_easy_hi', 'indic_boolq_hi', 'mrpc', 'piqa', 'sst2', 'winogrande']
4
+ 2024-06-08:19:03:43,618 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-06-08:19:03:43,618 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer'}
6
+ 2024-06-08:19:03:45,961 INFO [huggingface.py:164] Using device 'cuda'
7
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
8
+ warnings.warn(
9
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
10
+ 2024-06-08:19:04:20,128 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean
11
+ 2024-06-08:19:04:20,129 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
12
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue
13
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
14
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
15
+ warnings.warn(
16
+ 2024-06-08:19:04:21,896 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean
17
+ 2024-06-08:19:04:21,897 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
18
+ 2024-06-08:19:04:25,087 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
19
+ 2024-06-08:19:04:25,088 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
20
+ 2024-06-08:19:04:26,859 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
21
+ 2024-06-08:19:04:26,859 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
22
+ 2024-06-08:19:04:28,760 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean
23
+ 2024-06-08:19:04:28,760 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
24
+ 2024-06-08:19:04:28,761 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1
25
+ 2024-06-08:19:04:28,761 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True
26
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa
27
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
28
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
29
+ warnings.warn(
30
+ 2024-06-08:19:04:34,484 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean
31
+ 2024-06-08:19:04:34,484 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
32
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande
33
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
34
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
35
+ warnings.warn(
36
+ 2024-06-08:19:04:42,011 INFO [task.py:395] Building contexts for winogrande on rank 0...
37
+ 100%|██████████| 1267/1267 [00:00<00:00, 69176.18it/s]
38
+ 2024-06-08:19:04:42,102 INFO [task.py:395] Building contexts for sst2 on rank 0...
39
+ 100%|██████████| 872/872 [00:00<00:00, 2597.98it/s]
40
+ 2024-06-08:19:04:42,466 INFO [task.py:395] Building contexts for piqa on rank 0...
41
+ 100%|██████████| 1838/1838 [00:01<00:00, 1102.82it/s]
42
+ 2024-06-08:19:04:44,209 INFO [task.py:395] Building contexts for mrpc on rank 0...
43
+ 100%|██████████| 408/408 [00:00<00:00, 1882.89it/s]
44
+ 2024-06-08:19:04:44,444 INFO [task.py:395] Building contexts for indic_boolq_hi on rank 0...
45
+ 100%|██████████| 3270/3270 [00:01<00:00, 3060.92it/s]
46
+ 2024-06-08:19:04:45,684 INFO [task.py:395] Building contexts for indic_arc_easy_hi on rank 0...
47
+ 100%|██████████| 2376/2376 [00:02<00:00, 1140.22it/s]
48
+ 2024-06-08:19:04:48,009 INFO [task.py:395] Building contexts for indic_arc_challenge_hi on rank 0...
49
+ 100%|██████████| 1172/1172 [00:01<00:00, 1138.40it/s]
50
+ 2024-06-08:19:04:49,159 INFO [task.py:395] Building contexts for copa on rank 0...
51
+ 100%|██████████| 100/100 [00:00<00:00, 62657.66it/s]
52
+ 2024-06-08:19:04:49,168 INFO [task.py:395] Building contexts for boolq on rank 0...
53
+ 100%|██████████| 3270/3270 [00:01<00:00, 2005.31it/s]
54
+ 2024-06-08:19:04:50,927 INFO [task.py:395] Building contexts for arc_easy on rank 0...
55
+ 100%|██████████| 2376/2376 [00:02<00:00, 1061.27it/s]
56
+ 2024-06-08:19:04:53,311 INFO [evaluator.py:379] Running loglikelihood requests
57
+ Token indices sequence length is longer than the specified maximum sequence length for this model (1333 > 1024). Running this sequence through the model will result in indexing errors
58
+ Running loglikelihood requests: 0%| | 0/45739 [00:00<?, ?it/s]
59
+ Passed argument batch_size = auto:1. Detecting largest batch size
60
+
61
+
62
+
63
+
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
78
+
79
+
80
+
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+
106
+
107
+
108
+
109
+
110
+
111
+
112
+
113
+
114
+
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+
225
+
226
+
227
+
228
+
229
+
230
+
231
+
232
+
233
+
234
+
235
+
236
+
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+
260
+
261
+
262
+
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
+
272
+
273
+
274
+
275
+
276
+
277
+
278
+
279
+
280
+
281
+
282
+
283
+
284
+
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+
306
+
307
+
308
+
309
+
310
+
311
+
312
+
313
+
314
+
315
+
316
+
317
+
318
+
319
+
320
+
321
+
322
+
323
+
324
+
325
+
326
+
327
+
328
+
329
+
330
+
331
+
332
+
333
+
334
+
335
+
336
+
337
+
338
+
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+
348
+
349
+
350
+
351
+
352
+
353
+
354
+
355
+
356
+
357
+
358
+
359
+
360
+
361
+
362
+
363
+
364
+
365
+
366
+
367
+
368
+
369
+
370
+
371
+
372
+
373
+
374
+
375
+
376
+
377
+
378
+
379
+
380
+
381
+
382
+
383
+
384
+
385
+
386
+
387
+
388
+
389
+
390
+
391
+
392
+
393
+
394
+
395
+
396
+
397
+
398
+
399
+
400
+
401
+
402
+
403
+
404
+
405
+
406
+
407
+
408
+
409
+
410
+
411
+
412
+
413
+
414
+
415
+
416
+
417
+
418
+
419
+
420
+
421
+
422
+
423
+
424
+
425
+
426
+
427
+
428
+
429
+
430
+
431
+
432
+
433
+
434
+
435
+
436
+
437
+
438
+
439
+
440
+
441
+
442
+
443
+
444
+
445
+
446
+
447
+
448
+
449
+
450
+
451
+
452
+
453
+
454
+
455
+
456
+
457
+
458
+
459
+
460
+
461
+
462
+
463
+
464
+
465
+
466
+
467
+
468
+
469
+
470
+
471
+
472
+
473
+
474
+
475
+
476
+
477
+
478
+
479
+
480
+
481
+
482
+
483
+
484
+
485
+
486
+
487
+
488
+
489
+
490
+
491
+
492
+
493
+
494
+
495
+
496
+
497
+
498
+
499
+
500
+
501
+
502
+
503
+
504
+
505
+
506
+
507
+
508
+
509
+
510
+
511
+
512
+
513
+
514
+
515
+
516
+
517
+
518
+
519
+
520
+
521
+
522
+
523
+
524
+
525
+
526
+
527
+
528
+
529
+
530
+
531
+
532
+
533
+
534
+
535
+
536
+
537
+
538
+
539
+
540
+
541
+
542
+
543
+
544
+
545
+
546
+
547
+
548
+
549
+
550
+
551
+
552
+
553
+
554
+
555
+
556
+
557
+
558
+
559
+
560
+
561
+
562
+
563
+
564
+
565
+
566
+
567
+
568
+
569
+
570
+
571
+
572
+
573
+
574
+
575
+
576
+
577
+
578
+
579
+
580
+
581
+
582
+
583
+
584
+
585
+
586
+
587
+
588
+
589
+
590
+
591
+
592
+
593
+
594
+
595
+
596
+
597
+
598
+
599
+
600
+
601
+
602
+
603
+
604
+
605
+
606
+
607
+
608
+
609
+
610
+
611
+
612
+
613
+
614
+
615
+
616
+
617
+
618
+
619
+
620
+
621
+
622
+
623
+
624
+
625
+
626
+
627
+
628
+
629
+
630
+
631
+
632
+
633
+
634
+
635
+
636
+
637
+
638
+
639
+
640
+
641
+
642
+
643
+
644
+
645
+
646
+
647
+
648
+
649
+
650
+
651
+
652
+
653
+
654
+
655
+
656
+
657
+
658
+
659
+
660
+
661
+
662
+
663
+
664
+
665
+
666
+
667
+
668
+
669
+
670
+
671
+
672
+
673
+
674
+
675
+
676
+
677
+
678
+
679
+
680
+
681
+
682
+
683
+
684
+
685
+
686
+
687
+
688
+
689
+
690
+
691
+
692
+
693
+
694
+
695
+
696
+
697
+
698
+
699
+
700
+
701
+
702
+
703
+
704
+
705
+
706
+
707
+
708
+
709
+
710
+
711
+
712
+
713
+
714
+
715
+
716
+
717
+
718
+
719
+
720
+
721
+
722
+
723
+
724
+ Running loglikelihood requests: 100%|██████████| 45739/45739 [1:07:44<00:00, 11.25it/s]
725
+ 0%| | 0/100 [00:00<?, ?it/s]
726
+
727
+
728
+ 100%|██████████| 100/100 [02:10<00:00, 1.30s/it]
729
+ hf (pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer), gen_kwargs: (None), limit: None, num_fewshot: None, batch_size: auto (64)
730
+ | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|
731
+ |----------------------|------:|------|-----:|--------|-----:|---|-----:|
732
+ |winogrande | 1|none | 0|acc |0.5036|± |0.0141|
733
+ |sst2 | 1|none | 0|acc |0.5183|± |0.0169|
734
+ |piqa | 1|none | 0|acc |0.5174|± |0.0117|
735
+ | | |none | 0|acc_norm|0.5049|± |0.0117|
736
+ |mrpc | 1|none | 0|acc |0.3162|± |0.0230|
737
+ | | |none | 0|f1 |0.0000|± |0.0000|
738
+ |indic_boolq_hi | 1|none | 0|acc |0.6217|± |0.0085|
739
+ |indic_arc_easy_hi | 1|none | 0|acc |0.2399|± |0.0088|
740
+ |indic_arc_challenge_hi| 1|none | 0|acc |0.2082|± |0.0119|
741
+ |copa | 1|none | 0|acc |0.6000|± |0.0492|
742
+ |boolq | 2|none | 0|acc |0.3783|± |0.0085|
743
+ |arc_easy | 1|none | 0|acc |0.2635|± |0.0090|
744
+ | | |none | 0|acc_norm|0.2647|± |0.0091|
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/requirements.txt ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.31.0
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.2
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.3
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pip==22.0.2
81
+ pip==23.3.1
82
+ platformdirs==4.2.1
83
+ pluggy==1.5.0
84
+ portalocker==2.8.2
85
+ pre-commit==3.3.3
86
+ pretty-errors==1.2.25
87
+ protobuf==3.20.3
88
+ psutil==5.9.8
89
+ py-cpuinfo==9.0.0
90
+ pyarrow-hotfix==0.6
91
+ pyarrow==16.1.0
92
+ pyasn1==0.6.0
93
+ pyasn1_modules==0.4.0
94
+ pybind11==2.10.4
95
+ pycparser==2.22
96
+ pydantic==1.10.13
97
+ pynvml==8.0.4
98
+ pytablewriter==1.2.0
99
+ pytest==8.2.0
100
+ python-dateutil==2.9.0.post0
101
+ pytorch-lightning==2.2.4
102
+ pytz==2024.1
103
+ regex==2023.5.5
104
+ requests-oauthlib==2.0.0
105
+ requests==2.32.3
106
+ rouge_score==0.1.2
107
+ rsa==4.9
108
+ sacrebleu==2.4.2
109
+ safetensors==0.4.3
110
+ scikit-learn==1.5.0
111
+ scipy==1.13.1
112
+ sentencepiece==0.2.0
113
+ sentry-sdk==2.5.1
114
+ setproctitle==1.3.3
115
+ setuptools==59.6.0
116
+ setuptools==69.5.1
117
+ six==1.16.0
118
+ smmap==5.0.1
119
+ sqlitedict==2.1.0
120
+ symengine==0.11.0
121
+ sympy==1.12
122
+ tabledata==1.3.3
123
+ tabulate==0.9.0
124
+ tcolorpy==0.1.6
125
+ tdqm==0.0.1
126
+ tensorboard-data-server==0.6.1
127
+ tensorboard-plugin-wit==1.8.1
128
+ tensorboard==2.11.2
129
+ threadpoolctl==3.5.0
130
+ tokenizers==0.15.2
131
+ tomli==2.0.1
132
+ torch==2.2.0a0+git8964477
133
+ torch_tb_profiler==0.4.0
134
+ torchaudio==2.2.0+08901ad
135
+ torchdata==0.7.1+5e6f7b7
136
+ torchmetrics==1.4.0
137
+ torchtext==0.17.0+400da5c
138
+ torchvision==0.17.0+b2383d4
139
+ tqdm-multiprocess==0.0.11
140
+ tqdm==4.66.4
141
+ transformers==4.36.2
142
+ typepy==1.3.2
143
+ typing_extensions==4.11.0
144
+ tzdata==2024.1
145
+ urllib3==1.26.18
146
+ virtualenv==20.26.1
147
+ wandb==0.17.1
148
+ wheel==0.37.1
149
+ wheel==0.43.0
150
+ word2number==1.1
151
+ xxhash==3.4.1
152
+ yamllint==1.35.1
153
+ yarl==1.9.4
154
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-06-08T19:03:34.154505",
5
+ "startedAt": "2024-06-08T19:03:33.753786",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer",
13
+ "--tasks",
14
+ "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq,indic_arc_easy_hi,indic_arc_challenge_hi,indic_boolq_hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=engl-hi-eval,name=globalstep_240000,group=exp2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.499975,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3311.804,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3311.816,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3299.996,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3311.792,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 214.68004989624023
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379501342773
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"winogrande/alias": "winogrande", "sst2/alias": "sst2", "piqa/alias": "piqa", "mrpc/alias": "mrpc", "indic_boolq_hi/alias": "indic_boolq_hi", "indic_arc_easy_hi/alias": "indic_arc_easy_hi", "indic_arc_challenge_hi/alias": "indic_arc_challenge_hi", "copa/alias": "copa", "boolq/alias": "boolq", "arc_easy/alias": "arc_easy", "winogrande/acc": 0.5035516969218626, "winogrande/acc_stderr": 0.014052131146915852, "sst2/acc": 0.518348623853211, "sst2/acc_stderr": 0.016930442150613373, "piqa/acc": 0.5174102285092492, "piqa/acc_stderr": 0.011658749823107691, "piqa/acc_norm": 0.5048966267682263, "piqa/acc_norm_stderr": 0.01166526473007815, "mrpc/acc": 0.3161764705882353, "mrpc/acc_stderr": 0.023048336668420193, "mrpc/f1": 0.0, "mrpc/f1_stderr": 0.0, "indic_boolq_hi/acc": 0.6217125382262997, "indic_boolq_hi/acc_stderr": 0.008482001133930994, "indic_arc_easy_hi/acc": 0.2398989898989899, "indic_arc_easy_hi/acc_stderr": 0.008762298774190573, "indic_arc_challenge_hi/acc": 0.20819112627986347, "indic_arc_challenge_hi/acc_stderr": 0.011864866118448069, "copa/acc": 0.6, "copa/acc_stderr": 0.04923659639173309, "boolq/acc": 0.3782874617737003, "boolq/acc_stderr": 0.008482001133931, "arc_easy/acc": 0.26346801346801346, "arc_easy/acc_stderr": 0.00903915737449772, "arc_easy/acc_norm": 0.2647306397306397, "arc_easy/acc_norm_stderr": 0.009053021086173962, "_timestamp": 1717877751.6233525, "_runtime": 4337.845593452454, "_step": 1, "evaluation/eval_results": {"_type": "table-file", "sha256": "fd1718bec4834f9c91500207621c9a1c18329bb6deafe6d8b87af8596a3311d0", "size": 978, "artifact_path": "wandb-client-artifact://qcv7l9e0l9uu42zeh7opgbfgz6kb3nsranxiaczv74vsmwm4uek7m4gacg5vnuwzk5g5xlannu18aqfgevimbn7tqda8xd89atwa0kg02zhbikb94lyne999sybdomy2/evaluation/eval_results.table.json", "_latest_artifact_path": "wandb-client-artifact://vvw6nvubpnl20tia44h77azzqrz0ijm98oo104cbvibts4dvh49h6xye8bktt35bkah4qi46zq2fq7zm30wqmzvtf4xufl5viwmyug83gfi0uf54vi0kr5mn39cy76tc:latest/evaluation/eval_results.table.json", "path": "media/table/evaluation/eval_results_1_fd1718bec4834f9c9150.table.json", "ncols": 7, "nrows": 13}, "_wandb": {"runtime": 4339}}
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug.log ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Current SDK version is 0.17.1
2
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Configure stats pid to 30255
3
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-06-08 19:03:33,773 WARNING MainThread:30255 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug.log
11
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug-internal.log
12
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():560] calling init triggers
13
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():610] starting backend
16
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():614] setting up manager
17
+ 2024-06-08 19:03:33,776 INFO MainThread:30255 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-06-08 19:03:33,777 INFO MainThread:30255 [wandb_init.py:init():622] backend started and connected
19
+ 2024-06-08 19:03:33,781 INFO MainThread:30255 [wandb_init.py:init():711] updated telemetry
20
+ 2024-06-08 19:03:33,790 INFO MainThread:30255 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-06-08 19:03:33,992 INFO MainThread:30255 [wandb_run.py:_on_init():2402] communicating current version
22
+ 2024-06-08 19:03:34,052 INFO MainThread:30255 [wandb_run.py:_on_init():2411] got version response
23
+ 2024-06-08 19:03:34,052 INFO MainThread:30255 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-06-08 19:03:34,350 INFO MainThread:30255 [wandb_run.py:_console_start():2380] atexit reg
25
+ 2024-06-08 19:03:34,350 INFO MainThread:30255 [wandb_run.py:_redirect():2235] redirect: wrap_raw
26
+ 2024-06-08 19:03:34,351 INFO MainThread:30255 [wandb_run.py:_redirect():2300] Wrapping output streams.
27
+ 2024-06-08 19:03:34,351 INFO MainThread:30255 [wandb_run.py:_redirect():2325] Redirects installed.
28
+ 2024-06-08 19:03:34,353 INFO MainThread:30255 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-06-08 20:15:51,241 INFO MainThread:30255 [wandb_run.py:_config_callback():1382] config_cb None None {'task_configs': {'arc_easy': {'task': 'arc_easy', 'group': ['ai2_arc'], 'dataset_path': 'allenai/ai2_arc', 'dataset_name': 'ARC-Easy', 'training_split': 'train', 'validation_split': 'validation', 'test_split': 'test', 'doc_to_text': 'Question: {{question}}\nAnswer:', 'doc_to_target': '{{choices.label.index(answerKey)}}', 'doc_to_choice': '{{choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'boolq': {'task': 'boolq', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'boolq', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{passage}}\nQuestion: {{question}}?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'passage', 'metadata': {'version': 2.0}}, 'copa': {'task': 'copa', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'copa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n # Drop the period\n connector = {\n "cause": "because",\n "effect": "therefore",\n }[doc["question"]]\n return doc["premise"].strip()[:-1] + f" {connector}"\n', 'doc_to_target': 'def doc_to_target(doc):\n correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]\n # Connect the sentences\n return " " + convert_choice(correct_choice)\n', 'doc_to_choice': 'def doc_to_choice(doc):\n return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'indic_arc_challenge_hi': {'task': 'indic_arc_challenge_hi', 'group': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_arc_easy_hi': {'task': 'indic_arc_easy_hi', 'group': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_boolq_hi': {'task': 'indic_boolq_hi', 'group': 'Cognitive-Lab/Indic-BoolQ', 'dataset_path': 'Cognitive-Lab/Indic-BoolQ', 'dataset_name': 'hi', 'validation_split': 'validation', 'doc_to_text': 'Passage: {translated_passage}\nQuestion: {translated_question.strip()}\nAnswer:', 'doc_to_target': 'answer', 'doc_to_choice': ['true', 'false'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'mrpc': {'task': 'mrpc', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'mrpc', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}, {'metric': 'f1'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'piqa': {'task': 'piqa', 'dataset_path': 'piqa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Question: {{goal}}\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': '{{[sol1, sol2]}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'goal', 'metadata': {'version': 1.0}}, 'sst2': {'task': 'sst2', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'sst2', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['negative', 'positive'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'winogrande': {'task': 'winogrande', 'dataset_path': 'winogrande', 'dataset_name': 'winogrande_xl', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n answer_to_num = {"1": 0, "2": 1}\n return answer_to_num[doc["answer"]]\n', 'doc_to_target': 'def doc_to_target(doc):\n idx = doc["sentence"].index("_") + 1\n return doc["sentence"][idx:].strip()\n', 'doc_to_choice': 'def doc_to_choice(doc):\n idx = doc["sentence"].index("_")\n options = [doc["option1"], doc["option2"]]\n return [doc["sentence"][:idx] + opt for opt in options]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'sentence', 'metadata': {'version': 1.0}}}, 'cli_configs': {'model': 'hf', 'model_args': 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer', 'batch_size': 'auto', 'batch_sizes': [64], 'device': None, 'use_cache': None, 'limit': None, 'bootstrap_iters': 100000, 'gen_kwargs': None}}
30
+ 2024-06-08 20:15:51,834 INFO MainThread:30255 [wandb_run.py:_finish():2109] finishing run smlgenai/engl-hi-eval/82mnef5m
31
+ 2024-06-08 20:15:51,834 INFO MainThread:30255 [wandb_run.py:_atexit_cleanup():2349] got exitcode: 0
32
+ 2024-06-08 20:15:51,835 INFO MainThread:30255 [wandb_run.py:_restore():2332] restore
33
+ 2024-06-08 20:15:51,835 INFO MainThread:30255 [wandb_run.py:_restore():2338] restore done
34
+ 2024-06-08 20:15:57,740 INFO MainThread:30255 [wandb_run.py:_footer_history_summary_info():4008] rendering history
35
+ 2024-06-08 20:15:57,741 INFO MainThread:30255 [wandb_run.py:_footer_history_summary_info():4040] rendering summary
36
+ 2024-06-08 20:15:57,748 INFO MainThread:30255 [wandb_run.py:_footer_sync_info():3967] logging synced files
venv/lib/python3.10/site-packages/transformers/models/flava/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_flava": [
21
+ "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "FlavaConfig",
23
+ "FlavaImageCodebookConfig",
24
+ "FlavaImageConfig",
25
+ "FlavaMultimodalConfig",
26
+ "FlavaTextConfig",
27
+ ],
28
+ }
29
+
30
+ try:
31
+ if not is_vision_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"]
37
+ _import_structure["image_processing_flava"] = ["FlavaImageProcessor"]
38
+ _import_structure["processing_flava"] = ["FlavaProcessor"]
39
+
40
+ try:
41
+ if not is_torch_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ _import_structure["modeling_flava"] = [
47
+ "FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
48
+ "FlavaForPreTraining",
49
+ "FlavaImageCodebook",
50
+ "FlavaImageModel",
51
+ "FlavaModel",
52
+ "FlavaMultimodalModel",
53
+ "FlavaPreTrainedModel",
54
+ "FlavaTextModel",
55
+ ]
56
+
57
+ if TYPE_CHECKING:
58
+ from .configuration_flava import (
59
+ FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP,
60
+ FlavaConfig,
61
+ FlavaImageCodebookConfig,
62
+ FlavaImageConfig,
63
+ FlavaMultimodalConfig,
64
+ FlavaTextConfig,
65
+ )
66
+
67
+ try:
68
+ if not is_vision_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .feature_extraction_flava import FlavaFeatureExtractor
74
+ from .image_processing_flava import FlavaImageProcessor
75
+ from .processing_flava import FlavaProcessor
76
+
77
+ try:
78
+ if not is_torch_available():
79
+ raise OptionalDependencyNotAvailable()
80
+ except OptionalDependencyNotAvailable:
81
+ pass
82
+ else:
83
+ from .modeling_flava import (
84
+ FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
85
+ FlavaForPreTraining,
86
+ FlavaImageCodebook,
87
+ FlavaImageModel,
88
+ FlavaModel,
89
+ FlavaMultimodalModel,
90
+ FlavaPreTrainedModel,
91
+ FlavaTextModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/configuration_flava.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_dalle_to_flava_codebook.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/convert_flava_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/feature_extraction_flava.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/image_processing_flava.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/modeling_flava.cpython-310.pyc ADDED
Binary file (67.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/__pycache__/processing_flava.cpython-310.pyc ADDED
Binary file (5.29 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/flava/configuration_flava.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ FLAVA model configurations"""
16
+
17
+ import os
18
+ from typing import Any, Dict, Union
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ from ..deprecated._archive_maps import FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
28
+
29
+
30
+ class FlavaImageConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
33
+ FLAVA model according to the specified arguments, defining the model architecture.
34
+
35
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
36
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
37
+
38
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
39
+ documentation from [`PretrainedConfig`] for more information.
40
+
41
+
42
+ Args:
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimensionality of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
57
+ The dropout ratio for the attention probabilities.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ image_size (`int`, *optional*, defaults to 224):
63
+ The size (resolution) of each image.
64
+ patch_size (`int`, *optional*, defaults to 16):
65
+ The size (resolution) of each patch.
66
+ num_channels (`int`, *optional*, defaults to 3):
67
+ The number of input channels.
68
+ qkv_bias (`bool`, *optional*, defaults to `True`):
69
+ Whether to add a bias to the queries, keys and values.
70
+ mask_token (`bool`, *optional*, defaults to `True`):
71
+ Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
72
+ vocab_size (`int`, *optional*, defaults to 8192):
73
+ Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
74
+ Image Modeling) loss for FLAVA.
75
+
76
+ Example:
77
+
78
+ ```python
79
+ >>> from transformers import FlavaImageConfig, FlavaImageModel
80
+
81
+ >>> # Initializing a FlavaImageModel with style configuration
82
+ >>> configuration = FlavaImageConfig()
83
+
84
+ >>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
85
+ >>> model = FlavaImageModel(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "flava_image_model"
92
+
93
+ def __init__(
94
+ self,
95
+ hidden_size: int = 768,
96
+ num_hidden_layers: int = 12,
97
+ num_attention_heads: int = 12,
98
+ intermediate_size: int = 3072,
99
+ hidden_act: int = "gelu",
100
+ hidden_dropout_prob: float = 0.0,
101
+ attention_probs_dropout_prob: float = 0.0,
102
+ initializer_range: float = 0.02,
103
+ layer_norm_eps: float = 1e-12,
104
+ image_size: int = 224,
105
+ patch_size: int = 16,
106
+ num_channels: int = 3,
107
+ qkv_bias: bool = True,
108
+ mask_token: bool = True,
109
+ vocab_size: int = 8192,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(**kwargs)
113
+
114
+ self.hidden_size = hidden_size
115
+ self.num_hidden_layers = num_hidden_layers
116
+ self.num_attention_heads = num_attention_heads
117
+ self.intermediate_size = intermediate_size
118
+ self.hidden_act = hidden_act
119
+ self.hidden_dropout_prob = hidden_dropout_prob
120
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
121
+ self.initializer_range = initializer_range
122
+ self.layer_norm_eps = layer_norm_eps
123
+ self.image_size = image_size
124
+ self.patch_size = patch_size
125
+ self.num_channels = num_channels
126
+ self.qkv_bias = qkv_bias
127
+ self.mask_token = mask_token
128
+ self.vocab_size = vocab_size
129
+
130
+ @classmethod
131
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
132
+ cls._set_token_in_kwargs(kwargs)
133
+
134
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
135
+
136
+ # get the image config dict if we are loading from FlavaConfig
137
+ if config_dict.get("model_type") == "flava":
138
+ config_dict = config_dict["image_config"]
139
+
140
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
141
+ logger.warning(
142
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
143
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
144
+ )
145
+
146
+ return cls.from_dict(config_dict, **kwargs)
147
+
148
+
149
+ class FlavaTextConfig(PretrainedConfig):
150
+ r"""
151
+ This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
152
+ FLAVA model according to the specified arguments, defining the model architecture.
153
+
154
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
155
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
156
+
157
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
158
+ documentation from [`PretrainedConfig`] for more information.
159
+
160
+
161
+ Args:
162
+ vocab_size (`int`, *optional*, defaults to 30522):
163
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
164
+ `inputs_ids` passed when calling [`FlavaTextModel`].
165
+ type_vocab_size (`int`, *optional*, defaults to 2):
166
+ The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
167
+ text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
168
+ used similar to RoBERTa.
169
+ max_position_embeddings (`int`, *optional*, defaults to 512):
170
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
171
+ just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
172
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
173
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
174
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
175
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
176
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
177
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
178
+ hidden_size (`int`, *optional*, defaults to 768):
179
+ Dimensionality of the encoder layers and the pooler layer.
180
+ num_hidden_layers (`int`, *optional*, defaults to 12):
181
+ Number of hidden layers in the Transformer encoder.
182
+ num_attention_heads (`int`, *optional*, defaults to 12):
183
+ Number of attention heads for each attention layer in the Transformer encoder.
184
+ intermediate_size (`int`, *optional*, defaults to 3072):
185
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
186
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
187
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
188
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
189
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
190
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
191
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
192
+ The dropout ratio for the attention probabilities.
193
+ initializer_range (`float`, *optional*, defaults to 0.02):
194
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
195
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
196
+ The epsilon used by the layer normalization layers.
197
+ image_size (`int`, *optional*, defaults to 224):
198
+ The size (resolution) of each image.
199
+ patch_size (`int`, *optional*, defaults to 16):
200
+ The size (resolution) of each patch.
201
+ num_channels (`int`, *optional*, defaults to 3):
202
+ The number of input channels.
203
+ qkv_bias (`bool`, *optional*, defaults to `True`):
204
+ Whether to add a bias to the queries, keys and values.
205
+
206
+ Example:
207
+
208
+ ```python
209
+ >>> from transformers import FlavaTextConfig, FlavaTextModel
210
+
211
+ >>> # Initializing a FlavaTextModel with style configuration
212
+ >>> configuration = FlavaTextConfig()
213
+
214
+ >>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
215
+ >>> model = FlavaTextModel(configuration)
216
+
217
+ >>> # Accessing the model configuration
218
+ >>> configuration = model.config
219
+ ```"""
220
+
221
+ model_type = "flava_text_model"
222
+
223
+ def __init__(
224
+ self,
225
+ vocab_size: int = 30522,
226
+ type_vocab_size: int = 2,
227
+ max_position_embeddings: int = 512,
228
+ position_embedding_type: str = "absolute",
229
+ hidden_size: int = 768,
230
+ num_hidden_layers: int = 12,
231
+ num_attention_heads: int = 12,
232
+ intermediate_size: int = 3072,
233
+ hidden_act: str = "gelu",
234
+ hidden_dropout_prob: float = 0.0,
235
+ attention_probs_dropout_prob: float = 0.0,
236
+ initializer_range: float = 0.02,
237
+ layer_norm_eps: float = 1e-12,
238
+ pad_token_id: int = 0,
239
+ qkv_bias: bool = True,
240
+ **kwargs,
241
+ ):
242
+ super().__init__(**kwargs)
243
+
244
+ self.vocab_size = vocab_size
245
+ self.type_vocab_size = type_vocab_size
246
+ self.max_position_embeddings = max_position_embeddings
247
+ self.position_embedding_type = position_embedding_type
248
+ self.hidden_size = hidden_size
249
+ self.num_hidden_layers = num_hidden_layers
250
+ self.num_attention_heads = num_attention_heads
251
+ self.intermediate_size = intermediate_size
252
+ self.hidden_act = hidden_act
253
+ self.hidden_dropout_prob = hidden_dropout_prob
254
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
255
+ self.initializer_range = initializer_range
256
+ self.layer_norm_eps = layer_norm_eps
257
+ self.qkv_bias = qkv_bias
258
+ self.pad_token_id = pad_token_id
259
+
260
+ @classmethod
261
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
262
+ cls._set_token_in_kwargs(kwargs)
263
+
264
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
265
+
266
+ # get the text config dict if we are loading from FlavaConfig
267
+ if config_dict.get("model_type") == "flava":
268
+ config_dict = config_dict["text_config"]
269
+
270
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
271
+ logger.warning(
272
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
273
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
274
+ )
275
+
276
+ return cls.from_dict(config_dict, **kwargs)
277
+
278
+
279
+ class FlavaMultimodalConfig(PretrainedConfig):
280
+ r"""
281
+ This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
282
+ an FLAVA model according to the specified arguments, defining the model architecture.
283
+
284
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
285
+ [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
286
+
287
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
288
+ documentation from [`PretrainedConfig`] for more information.
289
+
290
+
291
+ Args:
292
+ hidden_size (`int`, *optional*, defaults to 768):
293
+ Dimensionality of the encoder layers and the pooler layer.
294
+ num_hidden_layers (`int`, *optional*, defaults to 6):
295
+ Number of hidden layers in the Transformer encoder.
296
+ num_attention_heads (`int`, *optional*, defaults to 12):
297
+ Number of attention heads for each attention layer in the Transformer encoder.
298
+ intermediate_size (`int`, *optional*, defaults to 3072):
299
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
300
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
301
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
302
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
303
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
304
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
305
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
306
+ The dropout ratio for the attention probabilities.
307
+ initializer_range (`float`, *optional*, defaults to 0.02):
308
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
309
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
310
+ The epsilon used by the layer normalization layers.
311
+ qkv_bias (`bool`, *optional*, defaults to `True`):
312
+ Whether to add a bias to the queries, keys and values.
313
+ use_cls_token (`bool`, *optional*, defaults to `True`):
314
+ Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
315
+
316
+
317
+ Example:
318
+
319
+ ```python
320
+ >>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
321
+
322
+ >>> # Initializing a FlavaMultimodalModel with style configuration
323
+ >>> configuration = FlavaMultimodalConfig()
324
+
325
+ >>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
326
+ >>> model = FlavaMultimodalModel(configuration)
327
+
328
+ >>> # Accessing the model configuration
329
+ >>> configuration = model.config
330
+ ```"""
331
+
332
+ model_type = "flava_multimodal_model"
333
+
334
+ def __init__(
335
+ self,
336
+ hidden_size: int = 768,
337
+ num_hidden_layers: int = 6,
338
+ num_attention_heads: int = 12,
339
+ intermediate_size: int = 3072,
340
+ hidden_act: int = "gelu",
341
+ hidden_dropout_prob: int = 0.0,
342
+ attention_probs_dropout_prob: int = 0.0,
343
+ initializer_range: float = 0.02,
344
+ layer_norm_eps: float = 1e-12,
345
+ qkv_bias: bool = True,
346
+ use_cls_token: bool = True,
347
+ **kwargs,
348
+ ):
349
+ super().__init__(**kwargs)
350
+
351
+ self.hidden_size = hidden_size
352
+ self.num_hidden_layers = num_hidden_layers
353
+ self.num_attention_heads = num_attention_heads
354
+ self.intermediate_size = intermediate_size
355
+ self.hidden_act = hidden_act
356
+ self.hidden_dropout_prob = hidden_dropout_prob
357
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
358
+ self.initializer_range = initializer_range
359
+ self.layer_norm_eps = layer_norm_eps
360
+ self.qkv_bias = qkv_bias
361
+ self.use_cls_token = use_cls_token
362
+
363
+ @classmethod
364
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
365
+ cls._set_token_in_kwargs(kwargs)
366
+
367
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
368
+
369
+ # get the multimodal config dict if we are loading from FlavaConfig
370
+ if config_dict.get("model_type") == "flava":
371
+ config_dict = config_dict["multimodal_config"]
372
+
373
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
374
+ logger.warning(
375
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
376
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
377
+ )
378
+
379
+ return cls.from_dict(config_dict, **kwargs)
380
+
381
+
382
+ class FlavaImageCodebookConfig(PretrainedConfig):
383
+ model_type = "flava_image_codebook"
384
+
385
+ r"""
386
+ [`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It
387
+ is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.
388
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
389
+ [facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.
390
+
391
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
392
+ documentation from [`PretrainedConfig`] for more information.
393
+
394
+ Args:
395
+ num_groups (`int`, defaults to 4):
396
+ Number of groups to be created. This parameter as of now doesn't affect the model and is used for some
397
+ internal calculation and estimations.
398
+ input_channels (`int`, defaults to 3):
399
+ Number of channels in the image to be passed.
400
+ num_blocks_per_group (`int`, defaults to 2):
401
+ Number of conv-based blocks per group.
402
+ hidden_size (`int`, defaults to 256):
403
+ Size of hidden dim for the blocks.
404
+ vocab_size (`int`, defaults to 8192):
405
+ Size of the output vocabulary for the codebook.
406
+ freeze (`bool`, defaults to `True`):
407
+ Whether to freeze the weights of the model.
408
+ initializer_range (`float`, *optional*, defaults to 0.02):
409
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
410
+ kwargs (*optional*):
411
+ Dictionary of keyword arguments.
412
+
413
+ Example:
414
+
415
+ ```python
416
+ >>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook
417
+
418
+ >>> # Initializing a FlavaImageCodebook with style configuration
419
+ >>> configuration = FlavaImageCodebookConfig()
420
+
421
+ >>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration
422
+ >>> model = FlavaImageCodebook(configuration)
423
+ >>> # Accessing the model configuration
424
+ >>> configuration = model.config
425
+ ```
426
+ """
427
+
428
+ def __init__(
429
+ self,
430
+ num_groups: int = 4,
431
+ input_channels: int = 3,
432
+ num_blocks_per_group: int = 2,
433
+ hidden_size: int = 256,
434
+ vocab_size: int = 8192,
435
+ freeze: int = True,
436
+ initializer_range: float = 0.02,
437
+ **kwargs,
438
+ ):
439
+ super().__init__(**kwargs)
440
+ self.num_groups = num_groups
441
+ self.input_channels = input_channels
442
+ self.num_blocks_per_group = num_blocks_per_group
443
+ self.hidden_size = hidden_size
444
+ self.vocab_size = vocab_size
445
+ self.freeze = freeze
446
+ self.initializer_range = initializer_range
447
+
448
+ @classmethod
449
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
450
+ cls._set_token_in_kwargs(kwargs)
451
+
452
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
453
+
454
+ # get the image codebook config dict if we are loading from FlavaConfig
455
+ if config_dict.get("model_type") == "flava":
456
+ config_dict = config_dict["image_codebook_config"]
457
+
458
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
459
+ logger.warning(
460
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
461
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
462
+ )
463
+
464
+ return cls.from_dict(config_dict, **kwargs)
465
+
466
+
467
+ class FlavaConfig(PretrainedConfig):
468
+ r"""
469
+ [`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
470
+ instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
471
+ and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
472
+ that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
473
+
474
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
475
+ documentation from [`PretrainedConfig`] for more information.
476
+
477
+ Args:
478
+ text_config (`dict`, *optional*):
479
+ Dictionary of configuration options used to initialize [`FlavaTextConfig`].
480
+ image_config (`dict`, *optional*):
481
+ Dictionary of configuration options used to initialize [`FlavaImageConfig`].
482
+ multimodal_config (`dict`, *optional*):
483
+ Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
484
+ hidden_size (`int`, *optional*, defaults to 768):
485
+ Dimensionality of the encoder layers and the pooler layer.
486
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
487
+ The epsilon used by the layer normalization layers.
488
+ projection_dim (`int`, *optional*, defaults to 512):
489
+ Dimentionality of text and image projection layers.
490
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
491
+ The inital value of the *logit_scale* paramter. Default is used as per the original FLAVA/CLIP
492
+ implementation.
493
+ initializer_range (`float`, *optional*, defaults to 0.02):
494
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
495
+ ce_ignore_index (`int`, *optional*, defaults to -100):
496
+ Cross entropy index to ignore.
497
+ mim_weight (`float`, *optional*, defaults to 1.0):
498
+ Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
499
+ mlm_weight (`float`, *optional*, defaults to 1.0):
500
+ Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
501
+ global_contrastive_weight (`float`, *optional*, defaults to 1.0):
502
+ Weight to be assigned to global contrastive cross-alignment loss.
503
+ itm_weight (`float`, *optional*, defaults to 1.0):
504
+ Weight to be assigned to image-text matching multimodal loss.
505
+ mmm_image_weight (`float`, *optional*, defaults to 1.0):
506
+ Weight to be assigned to MMM loss's image part.
507
+ mmm_text_weight (`float`, *optional*, defaults to 1.0):
508
+ Weight to be assigned to MMM loss's text part.
509
+ global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
510
+ Whether to use global backpropgation through all workers in contrastive loss.
511
+ skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
512
+ Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
513
+ return_loss (`bool`, *optional*, defaults to `True`):
514
+ Whether to return loss or not
515
+
516
+ kwargs (*optional*):
517
+ Dictionary of keyword arguments.
518
+
519
+ Example:
520
+
521
+ ```python
522
+ >>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
523
+
524
+ >>> # Initializing a FlavaConfig with style configuration
525
+ >>> configuration = FlavaConfig()
526
+
527
+ >>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
528
+ >>> model = FlavaModel(configuration)
529
+ >>> model_pre = FlavaForPreTraining(configuration)
530
+
531
+ >>> # Accessing the model configuration
532
+ >>> configuration = model.config
533
+ >>> configuration_pre = model_pre.config
534
+ ```
535
+ """
536
+
537
+ model_type = "flava"
538
+
539
+ def __init__(
540
+ self,
541
+ image_config: Dict[str, Any] = None,
542
+ text_config: Dict[str, Any] = None,
543
+ multimodal_config: Dict[str, Any] = None,
544
+ image_codebook_config: Dict[str, Any] = None,
545
+ hidden_size: int = 768,
546
+ layer_norm_eps: float = 1e-12,
547
+ projection_dim: int = 768,
548
+ init_codebook: bool = True,
549
+ logit_scale_init_value: float = 2.6592,
550
+ initializer_range: float = 0.02,
551
+ ce_ignore_index: int = -100,
552
+ mim_weight: float = 1.0,
553
+ mlm_weight: float = 1.0,
554
+ global_contrastive_weight: float = 1.0,
555
+ itm_weight: float = 1.0,
556
+ mmm_image_weight: float = 1.0,
557
+ mmm_text_weight: float = 1.0,
558
+ global_backprop_contrastive: bool = True,
559
+ skip_unmasked_multimodal_encoder: bool = True,
560
+ return_loss: bool = True,
561
+ **kwargs,
562
+ ):
563
+ # If `_config_dict` exist, we use them for the backward compatibility.
564
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
565
+ # of confusion!).
566
+ text_config_dict = kwargs.pop("text_config_dict", None)
567
+ image_config_dict = kwargs.pop("image_config_dict", None)
568
+ multimodal_config_dict = kwargs.pop("multimodal_config_dict", None)
569
+ image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None)
570
+
571
+ super().__init__(**kwargs)
572
+
573
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
574
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
575
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
576
+ if text_config_dict is not None:
577
+ if text_config is None:
578
+ text_config = {}
579
+
580
+ # This is the complete result when using `text_config_dict`.
581
+ _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()
582
+
583
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
584
+ for key, value in _text_config_dict.items():
585
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
586
+ # If specified in `text_config_dict`
587
+ if key in text_config_dict:
588
+ message = (
589
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
590
+ f'The value `text_config_dict["{key}"]` will be used instead.'
591
+ )
592
+ # If inferred from default argument values (just to be super careful)
593
+ else:
594
+ message = (
595
+ f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The "
596
+ f'value `text_config["{key}"]` will be overriden.'
597
+ )
598
+ logger.info(message)
599
+
600
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
601
+ text_config.update(_text_config_dict)
602
+
603
+ if image_config_dict is not None:
604
+ if image_config is None:
605
+ image_config = {}
606
+
607
+ # This is the complete result when using `image_config_dict`.
608
+ _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()
609
+ # convert keys to string instead of integer
610
+ if "id2label" in _image_config_dict:
611
+ _image_config_dict["id2label"] = {
612
+ str(key): value for key, value in _image_config_dict["id2label"].items()
613
+ }
614
+
615
+ # Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different.
616
+ for key, value in _image_config_dict.items():
617
+ if key in image_config and value != image_config[key] and key not in ["transformers_version"]:
618
+ # If specified in `image_config_dict`
619
+ if key in image_config_dict:
620
+ message = (
621
+ f"`{key}` is found in both `image_config_dict` and `image_config` but with different "
622
+ f'values. The value `image_config_dict["{key}"]` will be used instead.'
623
+ )
624
+ # If inferred from default argument values (just to be super careful)
625
+ else:
626
+ message = (
627
+ f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. "
628
+ f'The value `image_config["{key}"]` will be overriden.'
629
+ )
630
+ logger.info(message)
631
+
632
+ # Update all values in `image_config` with the ones in `_image_config_dict`.
633
+ image_config.update(_image_config_dict)
634
+
635
+ if multimodal_config_dict is not None:
636
+ if multimodal_config is None:
637
+ multimodal_config = {}
638
+
639
+ # This is the complete result when using `multimodal_config_dict`.
640
+ _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()
641
+
642
+ # Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being
643
+ # different.
644
+ for key, value in _multimodal_config_dict.items():
645
+ if (
646
+ key in multimodal_config
647
+ and value != multimodal_config[key]
648
+ and key not in ["transformers_version"]
649
+ ):
650
+ # If specified in `multimodal_config_dict`
651
+ if key in multimodal_config_dict:
652
+ message = (
653
+ f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with "
654
+ f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.'
655
+ )
656
+ # If inferred from default argument values (just to be super careful)
657
+ else:
658
+ message = (
659
+ f"`multimodal_config_dict` is provided which will be used to initialize "
660
+ f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overriden.'
661
+ )
662
+ logger.info(message)
663
+
664
+ # Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`.
665
+ multimodal_config.update(_multimodal_config_dict)
666
+
667
+ if image_codebook_config_dict is not None:
668
+ if image_codebook_config is None:
669
+ image_codebook_config = {}
670
+
671
+ # This is the complete result when using `image_codebook_config_dict`.
672
+ _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()
673
+
674
+ # Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but
675
+ # being different.
676
+ for key, value in _image_codebook_config_dict.items():
677
+ if (
678
+ key in image_codebook_config
679
+ and value != image_codebook_config[key]
680
+ and key not in ["transformers_version"]
681
+ ):
682
+ # If specified in `image_codebook_config_dict`
683
+ if key in image_codebook_config_dict:
684
+ message = (
685
+ f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but "
686
+ f'with different values. The value `image_codebook_config_dict["{key}"]` will be used '
687
+ "instead."
688
+ )
689
+ # If inferred from default argument values (just to be super careful)
690
+ else:
691
+ message = (
692
+ f"`image_codebook_config_dict` is provided which will be used to initialize "
693
+ f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overriden.'
694
+ )
695
+ logger.info(message)
696
+
697
+ # Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`.
698
+ image_codebook_config.update(_image_codebook_config_dict)
699
+
700
+ if image_config is None:
701
+ image_config = {}
702
+ logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.")
703
+
704
+ if text_config is None:
705
+ text_config = {}
706
+ logger.info("`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.")
707
+
708
+ if multimodal_config is None:
709
+ multimodal_config = {}
710
+ logger.info("`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.")
711
+
712
+ if image_codebook_config is None:
713
+ image_codebook_config = {}
714
+ logger.info(
715
+ "`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values."
716
+ )
717
+
718
+ self.image_config = FlavaImageConfig(**image_config)
719
+ self.text_config = FlavaTextConfig(**text_config)
720
+ self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
721
+ self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
722
+ self.projection_dim = projection_dim
723
+ self.init_codebook = init_codebook
724
+
725
+ self.hidden_size = hidden_size
726
+ self.layer_norm_eps = layer_norm_eps
727
+ self.initializer_range = initializer_range
728
+ self.logit_scale_init_value = logit_scale_init_value
729
+ self.initializer_factor = 1.0
730
+ self.ce_ignore_index = ce_ignore_index
731
+ self.mim_weight = mim_weight
732
+ self.mlm_weight = mlm_weight
733
+ self.global_contrastive_weight = global_contrastive_weight
734
+ self.itm_weight = itm_weight
735
+ self.mmm_image_weight = mmm_image_weight
736
+ self.mmm_text_weight = mmm_text_weight
737
+ self.global_backprop_contrastive = global_backprop_contrastive
738
+ self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
739
+ self.return_loss = return_loss
740
+
741
+ @classmethod
742
+ def from_configs(
743
+ cls,
744
+ image_config: FlavaImageConfig,
745
+ text_config: FlavaTextConfig,
746
+ multimodal_config: FlavaMultimodalConfig,
747
+ image_codebook_config: FlavaImageCodebookConfig,
748
+ **kwargs,
749
+ ):
750
+ r"""
751
+ Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
752
+ configuration, flava multimodal model and flava codebook model configuration.
753
+
754
+ Returns:
755
+ [`FlavaConfig`]: An instance of a configuration object
756
+ """
757
+
758
+ return cls(
759
+ image_config=image_config.to_dict(),
760
+ text_config=text_config.to_dict(),
761
+ multimodal_config=multimodal_config.to_dict(),
762
+ image_codebook_config=image_codebook_config.to_dict(),
763
+ **kwargs,
764
+ )
venv/lib/python3.10/site-packages/transformers/models/flava/convert_dalle_to_flava_codebook.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+
19
+ import torch
20
+
21
+ from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
22
+
23
+
24
+ def rreplace(s, old, new, occurrence):
25
+ li = s.rsplit(old, occurrence)
26
+ return new.join(li)
27
+
28
+
29
+ def count_parameters(state_dict):
30
+ # encoder.embeddings are double copied in original FLAVA
31
+ return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
32
+
33
+
34
+ def upgrade_state_dict(state_dict):
35
+ upgrade = {}
36
+
37
+ group_keys = ["group_1", "group_2", "group_3", "group_4"]
38
+ for key, value in state_dict.items():
39
+ for group_key in group_keys:
40
+ if group_key in key:
41
+ key = key.replace(f"{group_key}.", f"{group_key}.group.")
42
+
43
+ if "res_path" in key:
44
+ key = key.replace("res_path.", "res_path.path.")
45
+
46
+ if key.endswith(".w"):
47
+ key = rreplace(key, ".w", ".weight", 1)
48
+ if key.endswith(".b"):
49
+ key = rreplace(key, ".b", ".bias", 1)
50
+
51
+ upgrade[key] = value.float()
52
+
53
+ return upgrade
54
+
55
+
56
+ @torch.no_grad()
57
+ def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True):
58
+ """
59
+ Copy/paste/tweak model's weights to transformers design.
60
+ """
61
+ from dall_e import Encoder
62
+
63
+ encoder = Encoder()
64
+ if os.path.exists(checkpoint_path):
65
+ ckpt = torch.load(checkpoint_path)
66
+ else:
67
+ ckpt = torch.hub.load_state_dict_from_url(checkpoint_path)
68
+
69
+ if isinstance(ckpt, Encoder):
70
+ ckpt = ckpt.state_dict()
71
+ encoder.load_state_dict(ckpt)
72
+
73
+ if config_path is not None:
74
+ config = FlavaImageCodebookConfig.from_pretrained(config_path)
75
+ else:
76
+ config = FlavaImageCodebookConfig()
77
+
78
+ hf_model = FlavaImageCodebook(config).eval()
79
+ state_dict = encoder.state_dict()
80
+
81
+ hf_state_dict = upgrade_state_dict(state_dict)
82
+ hf_model.load_state_dict(hf_state_dict)
83
+ hf_state_dict = hf_model.state_dict()
84
+ hf_count = count_parameters(hf_state_dict)
85
+ state_dict_count = count_parameters(state_dict)
86
+
87
+ assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
88
+
89
+ if save_checkpoint:
90
+ hf_model.save_pretrained(pytorch_dump_folder_path)
91
+ else:
92
+ return hf_state_dict
93
+
94
+
95
+ if __name__ == "__main__":
96
+ parser = argparse.ArgumentParser()
97
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
98
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
99
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
100
+ args = parser.parse_args()
101
+
102
+ convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
venv/lib/python3.10/site-packages/transformers/models/flava/convert_flava_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import os
18
+
19
+ import torch
20
+
21
+ from transformers import FlavaConfig, FlavaForPreTraining
22
+ from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
23
+
24
+
25
+ def count_parameters(state_dict):
26
+ # encoder.embeddings are double copied in original FLAVA
27
+ return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
28
+
29
+
30
+ def upgrade_state_dict(state_dict, codebook_state_dict):
31
+ upgrade = {}
32
+
33
+ for key, value in state_dict.items():
34
+ if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
35
+ continue
36
+
37
+ key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head")
38
+ key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head")
39
+ key = key.replace("heads.cmd.itm_head.cls", "itm_head")
40
+ key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler")
41
+ key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale")
42
+ key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head")
43
+ key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head")
44
+ key = key.replace("mm_text_projection", "flava.text_to_mm_projection")
45
+ key = key.replace("mm_image_projection", "flava.image_to_mm_projection")
46
+ key = key.replace("image_encoder.module", "flava.image_model")
47
+ key = key.replace("text_encoder.module", "flava.text_model")
48
+ key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token")
49
+ key = key.replace("mm_encoder.module", "flava.multimodal_model")
50
+ key = key.replace("text_projection", "flava.text_projection")
51
+ key = key.replace("image_projection", "flava.image_projection")
52
+
53
+ upgrade[key] = value.float()
54
+
55
+ for key, value in codebook_state_dict.items():
56
+ upgrade[f"image_codebook.{key}"] = value
57
+
58
+ return upgrade
59
+
60
+
61
+ @torch.no_grad()
62
+ def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):
63
+ """
64
+ Copy/paste/tweak model's weights to transformers design.
65
+ """
66
+ if config_path is not None:
67
+ config = FlavaConfig.from_pretrained(config_path)
68
+ else:
69
+ config = FlavaConfig()
70
+
71
+ hf_model = FlavaForPreTraining(config).eval()
72
+
73
+ codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)
74
+
75
+ if os.path.exists(checkpoint_path):
76
+ state_dict = torch.load(checkpoint_path, map_location="cpu")
77
+ else:
78
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu")
79
+
80
+ hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)
81
+ hf_model.load_state_dict(hf_state_dict)
82
+ hf_state_dict = hf_model.state_dict()
83
+ hf_count = count_parameters(hf_state_dict)
84
+ state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)
85
+
86
+ assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
87
+
88
+ hf_model.save_pretrained(pytorch_dump_folder_path)
89
+
90
+
91
+ if __name__ == "__main__":
92
+ parser = argparse.ArgumentParser()
93
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
94
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
95
+ parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
96
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
97
+ args = parser.parse_args()
98
+
99
+ convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
venv/lib/python3.10/site-packages/transformers/models/flava/feature_extraction_flava.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for FLAVA."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_flava import FlavaImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class FlavaFeatureExtractor(FlavaImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use FlavaImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/flava/image_processing_flava.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Flava."""
16
+
17
+ import math
18
+ import random
19
+ from functools import lru_cache
20
+ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
25
+ from ...image_transforms import resize, to_channel_dimension_format
26
+ from ...image_utils import (
27
+ OPENAI_CLIP_MEAN,
28
+ OPENAI_CLIP_STD,
29
+ ChannelDimension,
30
+ ImageInput,
31
+ PILImageResampling,
32
+ infer_channel_dimension_format,
33
+ is_scaled_image,
34
+ make_list_of_images,
35
+ to_numpy_array,
36
+ valid_images,
37
+ validate_kwargs,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ # These values are taken from CLIP
51
+ FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN
52
+ FLAVA_IMAGE_STD = OPENAI_CLIP_STD
53
+ FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0]
54
+ FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0]
55
+ LOGIT_LAPLACE_EPS: float = 0.1
56
+
57
+
58
+ # Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py
59
+ class FlavaMaskingGenerator:
60
+ def __init__(
61
+ self,
62
+ input_size: Union[int, Tuple[int, int]] = 14,
63
+ total_mask_patches: int = 75,
64
+ mask_group_max_patches: Optional[int] = None,
65
+ mask_group_min_patches: int = 16,
66
+ mask_group_min_aspect_ratio: Optional[float] = 0.3,
67
+ mask_group_max_aspect_ratio: float = None,
68
+ ):
69
+ if not isinstance(input_size, tuple):
70
+ input_size = (input_size,) * 2
71
+ self.height, self.width = input_size
72
+
73
+ self.num_patches = self.height * self.width
74
+ self.total_mask_patches = total_mask_patches
75
+
76
+ self.mask_group_min_patches = mask_group_min_patches
77
+ self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
78
+
79
+ mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
80
+ self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
81
+
82
+ def __repr__(self):
83
+ repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
84
+ self.height,
85
+ self.width,
86
+ self.mask_group_min_patches,
87
+ self.mask_group_max_patches,
88
+ self.total_mask_patches,
89
+ self.log_aspect_ratio[0],
90
+ self.log_aspect_ratio[1],
91
+ )
92
+ return repr_str
93
+
94
+ def get_shape(self):
95
+ return self.height, self.width
96
+
97
+ def _mask(self, mask, max_mask_patches):
98
+ delta = 0
99
+ for _attempt in range(10):
100
+ target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
101
+ aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
102
+ height = int(round(math.sqrt(target_area * aspect_ratio)))
103
+ width = int(round(math.sqrt(target_area / aspect_ratio)))
104
+ if width < self.width and height < self.height:
105
+ top = random.randint(0, self.height - height)
106
+ left = random.randint(0, self.width - width)
107
+
108
+ num_masked = mask[top : top + height, left : left + width].sum()
109
+ # Overlap
110
+ if 0 < height * width - num_masked <= max_mask_patches:
111
+ for i in range(top, top + height):
112
+ for j in range(left, left + width):
113
+ if mask[i, j] == 0:
114
+ mask[i, j] = 1
115
+ delta += 1
116
+
117
+ if delta > 0:
118
+ break
119
+ return delta
120
+
121
+ def __call__(self):
122
+ mask = np.zeros(shape=self.get_shape(), dtype=int)
123
+ mask_count = 0
124
+ while mask_count < self.total_mask_patches:
125
+ max_mask_patches = self.total_mask_patches - mask_count
126
+ max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
127
+
128
+ delta = self._mask(mask, max_mask_patches)
129
+ if delta == 0:
130
+ break
131
+ else:
132
+ mask_count += delta
133
+
134
+ return mask
135
+
136
+
137
+ class FlavaImageProcessor(BaseImageProcessor):
138
+ r"""
139
+ Constructs a Flava image processor.
140
+
141
+ Args:
142
+ do_resize (`bool`, *optional*, defaults to `True`):
143
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
144
+ `do_resize` parameter in `preprocess`.
145
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
146
+ Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
147
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
148
+ Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
149
+ `preprocess`.
150
+ do_center_crop (`bool`, *optional*, defaults to `True`):
151
+ Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
152
+ crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
153
+ Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
154
+ `crop_size` parameter in `preprocess`.
155
+ do_rescale (`bool`, *optional*, defaults to `True`):
156
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
157
+ parameter in `preprocess`.
158
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
159
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
160
+ `preprocess`.
161
+ do_normalize (`bool`, *optional*, defaults to `True`):
162
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
163
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
164
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
165
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
166
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
167
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
168
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
169
+ return_image_mask (`bool`, *optional*, defaults to `False`):
170
+ Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
171
+ input_size_patches (`int`, *optional*, defaults to 14):
172
+ Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
173
+ by the `input_size_patches` parameter in `preprocess`.
174
+ total_mask_patches (`int`, *optional*, defaults to 75):
175
+ Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
176
+ `preprocess`.
177
+ mask_group_min_patches (`int`, *optional*, defaults to 16):
178
+ Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
179
+ parameter in `preprocess`.
180
+ mask_group_max_patches (`int`, *optional*):
181
+ Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
182
+ parameter in `preprocess`.
183
+ mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
184
+ Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
185
+ in `preprocess`.
186
+ mask_group_max_aspect_ratio (`float`, *optional*):
187
+ Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
188
+ in `preprocess`.
189
+ codebook_do_resize (`bool`, *optional*, defaults to `True`):
190
+ Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
191
+ parameter in `preprocess`. `codebook_size`.
192
+ codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
193
+ Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
194
+ `preprocess`.
195
+ codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
196
+ Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
197
+ parameter in `preprocess`.
198
+ codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
199
+ Whether to crop the input for codebook at the center. If the input size is smaller than
200
+ `codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
201
+ overridden by the `codebook_do_center_crop` parameter in `preprocess`.
202
+ codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
203
+ Desired output size for codebook input when applying center-cropping. Can be overridden by the
204
+ `codebook_crop_size` parameter in `preprocess`.
205
+ codebook_do_rescale (`bool`, *optional*, defaults to `True`):
206
+ Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
207
+ overridden by the `codebook_do_rescale` parameter in `preprocess`.
208
+ codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
209
+ Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
210
+ `codebook_rescale_factor` parameter in `preprocess`.
211
+ codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
212
+ Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
213
+ `codebook_do_map_pixels` parameter in `preprocess`.
214
+ codebook_do_normalize (`bool`, *optional*, defaults to `True`):
215
+ Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
216
+ be overridden by the `codebook_do_normalize` parameter in `preprocess`.
217
+ codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
218
+ The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
219
+ by the `codebook_image_mean` parameter in `preprocess`.
220
+ codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
221
+ The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
222
+ be overridden by the `codebook_image_std` parameter in `preprocess`.
223
+ """
224
+
225
+ model_input_names = ["pixel_values"]
226
+
227
+ def __init__(
228
+ self,
229
+ do_resize: bool = True,
230
+ size: Dict[str, int] = None,
231
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
232
+ do_center_crop: bool = True,
233
+ crop_size: Dict[str, int] = None,
234
+ do_rescale: bool = True,
235
+ rescale_factor: Union[int, float] = 1 / 255,
236
+ do_normalize: bool = True,
237
+ image_mean: Optional[Union[float, Iterable[float]]] = None,
238
+ image_std: Optional[Union[float, Iterable[float]]] = None,
239
+ # Mask related params
240
+ return_image_mask: bool = False,
241
+ input_size_patches: int = 14,
242
+ total_mask_patches: int = 75,
243
+ mask_group_min_patches: int = 16,
244
+ mask_group_max_patches: Optional[int] = None,
245
+ mask_group_min_aspect_ratio: float = 0.3,
246
+ mask_group_max_aspect_ratio: Optional[float] = None,
247
+ # Codebook related params
248
+ return_codebook_pixels: bool = False,
249
+ codebook_do_resize: bool = True,
250
+ codebook_size: bool = None,
251
+ codebook_resample: int = PILImageResampling.LANCZOS,
252
+ codebook_do_center_crop: bool = True,
253
+ codebook_crop_size: int = None,
254
+ codebook_do_rescale: bool = True,
255
+ codebook_rescale_factor: Union[int, float] = 1 / 255,
256
+ codebook_do_map_pixels: bool = True,
257
+ codebook_do_normalize: bool = True,
258
+ codebook_image_mean: Optional[Union[float, Iterable[float]]] = None,
259
+ codebook_image_std: Optional[Union[float, Iterable[float]]] = None,
260
+ **kwargs,
261
+ ) -> None:
262
+ super().__init__(**kwargs)
263
+ size = size if size is not None else {"height": 224, "width": 224}
264
+ size = get_size_dict(size)
265
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
266
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
267
+
268
+ codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
269
+ codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
270
+ codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
271
+ codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
272
+
273
+ self.do_resize = do_resize
274
+ self.size = size
275
+ self.resample = resample
276
+ self.do_rescale = do_rescale
277
+ self.rescale_factor = rescale_factor
278
+ self.do_center_crop = do_center_crop
279
+ self.crop_size = crop_size
280
+ self.do_normalize = do_normalize
281
+ self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
282
+ self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
283
+
284
+ self.return_image_mask = return_image_mask
285
+ self.input_size_patches = input_size_patches
286
+ self.total_mask_patches = total_mask_patches
287
+ self.mask_group_min_patches = mask_group_min_patches
288
+ self.mask_group_max_patches = mask_group_max_patches
289
+ self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
290
+ self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
291
+
292
+ self.return_codebook_pixels = return_codebook_pixels
293
+ self.codebook_do_resize = codebook_do_resize
294
+ self.codebook_size = codebook_size
295
+ self.codebook_resample = codebook_resample
296
+ self.codebook_do_center_crop = codebook_do_center_crop
297
+ self.codebook_crop_size = codebook_crop_size
298
+ self.codebook_do_rescale = codebook_do_rescale
299
+ self.codebook_rescale_factor = codebook_rescale_factor
300
+ self.codebook_do_map_pixels = codebook_do_map_pixels
301
+ self.codebook_do_normalize = codebook_do_normalize
302
+ self.codebook_image_mean = codebook_image_mean
303
+ self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
304
+ self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
305
+ self._valid_processor_keys = [
306
+ "images",
307
+ "do_resize",
308
+ "size",
309
+ "resample",
310
+ "do_center_crop",
311
+ "crop_size",
312
+ "do_rescale",
313
+ "rescale_factor",
314
+ "do_normalize",
315
+ "image_mean",
316
+ "image_std",
317
+ "return_image_mask",
318
+ "input_size_patches",
319
+ "total_mask_patches",
320
+ "mask_group_min_patches",
321
+ "mask_group_max_patches",
322
+ "mask_group_min_aspect_ratio",
323
+ "mask_group_max_aspect_ratio",
324
+ "return_codebook_pixels",
325
+ "codebook_do_resize",
326
+ "codebook_size",
327
+ "codebook_resample",
328
+ "codebook_do_center_crop",
329
+ "codebook_crop_size",
330
+ "codebook_do_rescale",
331
+ "codebook_rescale_factor",
332
+ "codebook_do_map_pixels",
333
+ "codebook_do_normalize",
334
+ "codebook_image_mean",
335
+ "codebook_image_std",
336
+ "return_tensors",
337
+ "data_format",
338
+ "input_data_format",
339
+ ]
340
+
341
+ @classmethod
342
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
343
+ """
344
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
345
+ created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
346
+ """
347
+ image_processor_dict = image_processor_dict.copy()
348
+ if "codebook_size" in kwargs:
349
+ image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
350
+ if "codebook_crop_size" in kwargs:
351
+ image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
352
+ return super().from_dict(image_processor_dict, **kwargs)
353
+
354
+ @lru_cache()
355
+ def masking_generator(
356
+ self,
357
+ input_size_patches,
358
+ total_mask_patches,
359
+ mask_group_min_patches,
360
+ mask_group_max_patches,
361
+ mask_group_min_aspect_ratio,
362
+ mask_group_max_aspect_ratio,
363
+ ) -> FlavaMaskingGenerator:
364
+ return FlavaMaskingGenerator(
365
+ input_size=input_size_patches,
366
+ total_mask_patches=total_mask_patches,
367
+ mask_group_min_patches=mask_group_min_patches,
368
+ mask_group_max_patches=mask_group_max_patches,
369
+ mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
370
+ mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
371
+ )
372
+
373
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
374
+ def resize(
375
+ self,
376
+ image: np.ndarray,
377
+ size: Dict[str, int],
378
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
379
+ data_format: Optional[Union[str, ChannelDimension]] = None,
380
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
381
+ **kwargs,
382
+ ) -> np.ndarray:
383
+ """
384
+ Resize an image to `(size["height"], size["width"])`.
385
+
386
+ Args:
387
+ image (`np.ndarray`):
388
+ Image to resize.
389
+ size (`Dict[str, int]`):
390
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
391
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
392
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
393
+ data_format (`ChannelDimension` or `str`, *optional*):
394
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
395
+ image is used. Can be one of:
396
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
397
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
398
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
399
+ input_data_format (`ChannelDimension` or `str`, *optional*):
400
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
401
+ from the input image. Can be one of:
402
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
403
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
404
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
405
+
406
+ Returns:
407
+ `np.ndarray`: The resized image.
408
+ """
409
+ size = get_size_dict(size)
410
+ if "height" not in size or "width" not in size:
411
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
412
+ output_size = (size["height"], size["width"])
413
+ return resize(
414
+ image,
415
+ size=output_size,
416
+ resample=resample,
417
+ data_format=data_format,
418
+ input_data_format=input_data_format,
419
+ **kwargs,
420
+ )
421
+
422
+ def map_pixels(self, image: np.ndarray) -> np.ndarray:
423
+ return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
424
+
425
+ def _preprocess_image(
426
+ self,
427
+ image: ImageInput,
428
+ do_resize: bool = None,
429
+ size: Dict[str, int] = None,
430
+ resample: PILImageResampling = None,
431
+ do_center_crop: bool = None,
432
+ crop_size: Dict[str, int] = None,
433
+ do_rescale: bool = None,
434
+ rescale_factor: float = None,
435
+ do_normalize: bool = None,
436
+ image_mean: Optional[Union[float, List[float]]] = None,
437
+ image_std: Optional[Union[float, List[float]]] = None,
438
+ do_map_pixels: bool = None,
439
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
440
+ input_data_format: Optional[ChannelDimension] = None,
441
+ ) -> np.ndarray:
442
+ """Preprocesses a single image."""
443
+
444
+ validate_preprocess_arguments(
445
+ do_rescale=do_rescale,
446
+ rescale_factor=rescale_factor,
447
+ do_normalize=do_normalize,
448
+ image_mean=image_mean,
449
+ image_std=image_std,
450
+ do_center_crop=do_center_crop,
451
+ crop_size=crop_size,
452
+ do_resize=do_resize,
453
+ size=size,
454
+ resample=resample,
455
+ )
456
+
457
+ # All transformations expect numpy arrays.
458
+ image = to_numpy_array(image)
459
+
460
+ if is_scaled_image(image) and do_rescale:
461
+ logger.warning_once(
462
+ "It looks like you are trying to rescale already rescaled images. If the input"
463
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
464
+ )
465
+
466
+ if input_data_format is None:
467
+ # We assume that all images have the same channel dimension format.
468
+ input_data_format = infer_channel_dimension_format(image)
469
+
470
+ if do_resize:
471
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
472
+
473
+ if do_center_crop:
474
+ image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
475
+
476
+ if do_rescale:
477
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
478
+
479
+ if do_normalize:
480
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
481
+
482
+ if do_map_pixels:
483
+ image = self.map_pixels(image)
484
+
485
+ if data_format is not None:
486
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
487
+ return image
488
+
489
+ def preprocess(
490
+ self,
491
+ images: ImageInput,
492
+ do_resize: Optional[bool] = None,
493
+ size: Dict[str, int] = None,
494
+ resample: PILImageResampling = None,
495
+ do_center_crop: Optional[bool] = None,
496
+ crop_size: Optional[Dict[str, int]] = None,
497
+ do_rescale: Optional[bool] = None,
498
+ rescale_factor: Optional[float] = None,
499
+ do_normalize: Optional[bool] = None,
500
+ image_mean: Optional[Union[float, List[float]]] = None,
501
+ image_std: Optional[Union[float, List[float]]] = None,
502
+ # Mask related params
503
+ return_image_mask: Optional[bool] = None,
504
+ input_size_patches: Optional[int] = None,
505
+ total_mask_patches: Optional[int] = None,
506
+ mask_group_min_patches: Optional[int] = None,
507
+ mask_group_max_patches: Optional[int] = None,
508
+ mask_group_min_aspect_ratio: Optional[float] = None,
509
+ mask_group_max_aspect_ratio: Optional[float] = None,
510
+ # Codebook related params
511
+ return_codebook_pixels: Optional[bool] = None,
512
+ codebook_do_resize: Optional[bool] = None,
513
+ codebook_size: Optional[Dict[str, int]] = None,
514
+ codebook_resample: Optional[int] = None,
515
+ codebook_do_center_crop: Optional[bool] = None,
516
+ codebook_crop_size: Optional[Dict[str, int]] = None,
517
+ codebook_do_rescale: Optional[bool] = None,
518
+ codebook_rescale_factor: Optional[float] = None,
519
+ codebook_do_map_pixels: Optional[bool] = None,
520
+ codebook_do_normalize: Optional[bool] = None,
521
+ codebook_image_mean: Optional[Iterable[float]] = None,
522
+ codebook_image_std: Optional[Iterable[float]] = None,
523
+ return_tensors: Optional[Union[str, TensorType]] = None,
524
+ data_format: ChannelDimension = ChannelDimension.FIRST,
525
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
526
+ **kwargs,
527
+ ) -> PIL.Image.Image:
528
+ """
529
+ Preprocess an image or batch of images.
530
+
531
+ Args:
532
+ images (`ImageInput`):
533
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
534
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
535
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
536
+ Whether to resize the image.
537
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
538
+ Size of the image.
539
+ resample (`int`, *optional*, defaults to `self.resample`):
540
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
541
+ has an effect if `do_resize` is set to `True`.
542
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
543
+ Whether to center crop the image.
544
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
545
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
546
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
547
+ Whether to rescale the image values between [0 - 1].
548
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
549
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
550
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
551
+ Whether to normalize the image.
552
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
553
+ Image mean.
554
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
555
+ Image standard deviation.
556
+ return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
557
+ Whether to return the image mask.
558
+ input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
559
+ Size of the patches to extract from the image.
560
+ total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
561
+ Total number of patches to extract from the image.
562
+ mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
563
+ Minimum number of patches to extract from the image.
564
+ mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
565
+ Maximum number of patches to extract from the image.
566
+ mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
567
+ Minimum aspect ratio of the patches to extract from the image.
568
+ mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
569
+ Maximum aspect ratio of the patches to extract from the image.
570
+ return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
571
+ Whether to return the codebook pixels.
572
+ codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
573
+ Whether to resize the codebook pixels.
574
+ codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
575
+ Size of the codebook pixels.
576
+ codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
577
+ Resampling filter to use if resizing the codebook pixels. This can be one of the enum
578
+ `PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
579
+ codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
580
+ Whether to center crop the codebook pixels.
581
+ codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
582
+ Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
583
+ to `True`.
584
+ codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
585
+ Whether to rescale the codebook pixels values between [0 - 1].
586
+ codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
587
+ Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
588
+ codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
589
+ Whether to map the codebook pixels values.
590
+ codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
591
+ Whether to normalize the codebook pixels.
592
+ codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
593
+ Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
594
+ codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
595
+ Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
596
+ set to `True`.
597
+ return_tensors (`str` or `TensorType`, *optional*):
598
+ The type of tensors to return. Can be one of:
599
+ - Unset: Return a list of `np.ndarray`.
600
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
601
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
602
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
603
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
604
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
605
+ The channel dimension format for the output image. Can be one of:
606
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
607
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
608
+ input_data_format (`ChannelDimension` or `str`, *optional*):
609
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
610
+ from the input image. Can be one of:
611
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
612
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
613
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
614
+ """
615
+ do_resize = do_resize if do_resize is not None else self.do_resize
616
+ size = size if size is not None else self.size
617
+ size = get_size_dict(size)
618
+ resample = resample if resample is not None else self.resample
619
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
620
+ crop_size = crop_size if crop_size is not None else self.crop_size
621
+ crop_size = get_size_dict(crop_size, param_name="crop_size")
622
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
623
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
624
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
625
+ image_mean = image_mean if image_mean is not None else self.image_mean
626
+ image_std = image_std if image_std is not None else self.image_std
627
+
628
+ return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
629
+ input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
630
+ total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
631
+ mask_group_min_patches = (
632
+ mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
633
+ )
634
+ mask_group_max_patches = (
635
+ mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
636
+ )
637
+ mask_group_min_aspect_ratio = (
638
+ mask_group_min_aspect_ratio
639
+ if mask_group_min_aspect_ratio is not None
640
+ else self.mask_group_min_aspect_ratio
641
+ )
642
+ mask_group_max_aspect_ratio = (
643
+ mask_group_max_aspect_ratio
644
+ if mask_group_max_aspect_ratio is not None
645
+ else self.mask_group_max_aspect_ratio
646
+ )
647
+
648
+ return_codebook_pixels = (
649
+ return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
650
+ )
651
+ codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
652
+ codebook_size = codebook_size if codebook_size is not None else self.codebook_size
653
+ codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
654
+ codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
655
+ codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
656
+ codebook_rescale_factor = (
657
+ codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
658
+ )
659
+ codebook_do_center_crop = (
660
+ codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
661
+ )
662
+ codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
663
+ codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
664
+ codebook_do_map_pixels = (
665
+ codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
666
+ )
667
+ codebook_do_normalize = (
668
+ codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
669
+ )
670
+ codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
671
+ codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
672
+
673
+ images = make_list_of_images(images)
674
+
675
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
676
+
677
+ if not valid_images(images):
678
+ raise ValueError(
679
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
680
+ "torch.Tensor, tf.Tensor or jax.ndarray."
681
+ )
682
+
683
+ processed_images = [
684
+ self._preprocess_image(
685
+ image=img,
686
+ do_resize=do_resize,
687
+ size=size,
688
+ resample=resample,
689
+ do_center_crop=do_center_crop,
690
+ crop_size=crop_size,
691
+ do_rescale=do_rescale,
692
+ rescale_factor=rescale_factor,
693
+ do_normalize=do_normalize,
694
+ image_mean=image_mean,
695
+ image_std=image_std,
696
+ do_map_pixels=False,
697
+ data_format=data_format,
698
+ input_data_format=input_data_format,
699
+ )
700
+ for img in images
701
+ ]
702
+ data = {"pixel_values": processed_images}
703
+
704
+ if return_codebook_pixels:
705
+ codebook_images = [
706
+ self._preprocess_image(
707
+ image=img,
708
+ do_resize=codebook_do_resize,
709
+ size=codebook_size,
710
+ resample=codebook_resample,
711
+ do_center_crop=codebook_do_center_crop,
712
+ crop_size=codebook_crop_size,
713
+ do_rescale=codebook_do_rescale,
714
+ rescale_factor=codebook_rescale_factor,
715
+ do_normalize=codebook_do_normalize,
716
+ image_mean=codebook_image_mean,
717
+ image_std=codebook_image_std,
718
+ do_map_pixels=codebook_do_map_pixels,
719
+ data_format=data_format,
720
+ input_data_format=input_data_format,
721
+ )
722
+ for img in images
723
+ ]
724
+ data["codebook_pixel_values"] = codebook_images
725
+
726
+ if return_image_mask:
727
+ mask_generator = self.masking_generator(
728
+ input_size_patches=input_size_patches,
729
+ total_mask_patches=total_mask_patches,
730
+ mask_group_min_patches=mask_group_min_patches,
731
+ mask_group_max_patches=mask_group_max_patches,
732
+ mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
733
+ mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
734
+ )
735
+ masks = [mask_generator() for _ in images]
736
+ data["bool_masked_pos"] = masks
737
+
738
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/flava/modeling_flava.py ADDED
@@ -0,0 +1,2098 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch FLAVA model."""
16
+
17
+ import collections
18
+ import math
19
+ from collections import OrderedDict
20
+ from dataclasses import dataclass
21
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
22
+
23
+ import torch
24
+ import torch.utils.checkpoint
25
+ from torch import nn
26
+
27
+ from ...activations import ACT2FN
28
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
29
+ from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_flava import (
39
+ FlavaConfig,
40
+ FlavaImageCodebookConfig,
41
+ FlavaImageConfig,
42
+ FlavaMultimodalConfig,
43
+ FlavaTextConfig,
44
+ )
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "facebook/flava-full"
50
+
51
+ # Codebook docstring
52
+ _CHECKPOINT_FOR_CODEBOOK_DOC = "facebook/flava-image-codebook"
53
+ _CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = "FlavaImageConfig"
54
+ _CONFIG_CLASS_FOR_TEXT_MODEL_DOC = "FlavaTextConfig"
55
+ _CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig"
56
+ _EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768]
57
+
58
+ from ..deprecated._archive_maps import FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST = ["facebook/flava-image-codebook"]
62
+ LOGIT_SCALE_CLAMP_MIN = 0
63
+ LOGIT_SCALE_CLAMP_MAX = 4.6052
64
+
65
+ FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig]
66
+
67
+
68
+ @dataclass
69
+ class FlavaModelOutput(ModelOutput):
70
+ """
71
+ Output from FlavaModel containing embeddings and outputs from individual encoders.
72
+
73
+ Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a
74
+ transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
75
+ `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
76
+
77
+ Args:
78
+ image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
79
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`].
80
+ image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
81
+ The output of the [`FlavaImageModel`].
82
+ text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
83
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
84
+ text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
85
+ The output of the [`FlavaTextModel`].
86
+ multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
87
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
88
+ multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
89
+ The output of the [`FlavaMultimodalModel`].
90
+ """
91
+
92
+ image_embeddings: Optional[torch.FloatTensor] = None
93
+ image_output: Optional[BaseModelOutputWithPooling] = None
94
+ text_embeddings: Optional[torch.FloatTensor] = None
95
+ text_output: Optional[BaseModelOutputWithPooling] = None
96
+ multimodal_embeddings: Optional[torch.FloatTensor] = None
97
+ multimodal_output: Optional[BaseModelOutputWithPooling] = None
98
+
99
+ def to_tuple(self) -> Tuple[Any]:
100
+ return tuple(
101
+ self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple()
102
+ for k in self.keys()
103
+ )
104
+
105
+
106
+ @dataclass
107
+ class FlavaLosses(ModelOutput):
108
+ """Class representing pretraining losses from FLAVA model
109
+
110
+ Args:
111
+ mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.:
112
+ Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
113
+ mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.:
114
+ Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
115
+ itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.:
116
+ Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
117
+ masked pairs in FLAVA.
118
+ global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.:
119
+ Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
120
+ data. This is calculated on unmasked images and texts.
121
+ mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.:
122
+ Masked Multimodal Modeling loss's image component calculated on paired image-text data.
123
+ mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.:
124
+ Masked Multimodal Modeling loss's text component calculated on paired image-text data.
125
+ """
126
+
127
+ mim: Optional[torch.FloatTensor] = None
128
+ mlm: Optional[torch.FloatTensor] = None
129
+ itm: Optional[torch.FloatTensor] = None
130
+ global_contrastive: Optional[torch.FloatTensor] = None
131
+ mmm_image: Optional[torch.FloatTensor] = None
132
+ mmm_text: Optional[torch.FloatTensor] = None
133
+
134
+ def all_none(self) -> bool:
135
+ all_none = True
136
+ for v in self.values():
137
+ if v is not None:
138
+ all_none = False
139
+ break
140
+ return all_none
141
+
142
+
143
+ @dataclass
144
+ class FlavaForPreTrainingOutput(ModelOutput):
145
+ """
146
+ Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.
147
+
148
+ Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a
149
+ transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
150
+ `text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
151
+
152
+ Args:
153
+ loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
154
+ Total loss calculated for this model.
155
+ loss_info (`FlavaLosses`):
156
+ Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
157
+ the keys.
158
+ image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
159
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`].
160
+ image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
161
+ The output of the [`FlavaImageModel`].
162
+ text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
163
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
164
+ text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
165
+ The output of the [`FlavaTextModel`].
166
+ multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
167
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
168
+ multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
169
+ The output of the [`FlavaMultimodalModel`].
170
+
171
+ image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
172
+ The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
173
+ to create masked images.
174
+ image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
175
+ The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
176
+ text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
177
+ The text embeddings which are basically the pooled output of [`FlavaTextModel`].
178
+ text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
179
+ The output of the [`FlavaTextModel`].
180
+ multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
181
+ The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
182
+ multimodal_masked_output (`BaseModelOutputWithPooling`, returned when `input_ids_masked` and `pixel_values` are present):
183
+ The output of the [`FlavaMultimodalModel`].
184
+
185
+ mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
186
+ The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
187
+ returned when `bool_masked_pos` has some of the patches masked.
188
+ mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
189
+ The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
190
+ the tokens masked.
191
+ itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
192
+ The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
193
+ mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
194
+ The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
195
+ output is returned when `bool_masked_pos` has some of the patches masked.
196
+ mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
197
+ The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
198
+ some of the tokens masked.
199
+ contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
200
+ The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
201
+ `image_projection` and `text_projection` layers respectively. This represents the image-text similarity
202
+ scores. This is calculated on unmasked images and texts.
203
+ contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
204
+ The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
205
+ `text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
206
+ texts.
207
+ """
208
+
209
+ loss: Optional[torch.FloatTensor] = None
210
+ loss_info: FlavaLosses = None
211
+ image_embeddings: Optional[torch.FloatTensor] = None
212
+ image_output: Optional[BaseModelOutputWithPooling] = None
213
+ text_embeddings: Optional[torch.FloatTensor] = None
214
+ text_output: Optional[BaseModelOutputWithPooling] = None
215
+ multimodal_embeddings: Optional[torch.FloatTensor] = None
216
+ multimodal_output: Optional[BaseModelOutputWithPooling] = None
217
+ image_masked_embeddings: Optional[torch.FloatTensor] = None
218
+ image_masked_output: Optional[BaseModelOutputWithPooling] = None
219
+ text_masked_embeddings: Optional[torch.FloatTensor] = None
220
+ text_masked_output: Optional[BaseModelOutputWithPooling] = None
221
+ multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
222
+ multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
223
+ mim_logits: Optional[torch.FloatTensor] = None
224
+ mlm_logits: Optional[torch.FloatTensor] = None
225
+ itm_logits: Optional[torch.FloatTensor] = None
226
+ contrastive_logits_per_image: Optional[torch.FloatTensor] = None
227
+ contrastive_logits_per_text: Optional[torch.FloatTensor] = None
228
+ mmm_image_logits: Optional[torch.FloatTensor] = None
229
+ mmm_text_logits: Optional[torch.FloatTensor] = None
230
+
231
+ def to_tuple(self) -> Tuple[Any]:
232
+ transformer_outputs = [
233
+ "text_output",
234
+ "image_output",
235
+ "multimodal_output",
236
+ "text_masked_output",
237
+ "image_masked_output",
238
+ "multimodal_masked_output",
239
+ ]
240
+ return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
241
+
242
+
243
+ # Based on timm implementation, which can be found here:
244
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
245
+ class FlavaImageEmbeddings(nn.Module):
246
+ """
247
+ Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
248
+ """
249
+
250
+ def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None:
251
+ super().__init__()
252
+
253
+ use_mask_token = use_mask_token or config.mask_token
254
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
255
+ self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
256
+ self.patch_embeddings = PatchEmbeddings(
257
+ image_size=config.image_size,
258
+ patch_size=config.patch_size,
259
+ num_channels=config.num_channels,
260
+ embed_dim=config.hidden_size,
261
+ )
262
+ num_patches = self.patch_embeddings.num_patches
263
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
264
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
265
+ self.config = config
266
+
267
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
268
+ """
269
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
270
+ resolution images.
271
+
272
+ Source:
273
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/image_transformer.py#L174
274
+ """
275
+
276
+ npatch = embeddings.shape[1] - 1
277
+ num_pos = self.position_embeddings.shape[1] - 1
278
+ if npatch == num_pos and height == width:
279
+ return self.position_embeddings
280
+ class_pos_embed = self.position_embeddings[:, 0]
281
+ patch_pos_embed = self.position_embeddings[:, 1:]
282
+ dim = embeddings.shape[-1]
283
+ num_h_patches = height // self.config.patch_size
284
+ num_w_patches = width // self.config.patch_size
285
+ # we add a small number to avoid floating point error in the interpolation
286
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
287
+ num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
288
+ patch_pos_embed = nn.functional.interpolate(
289
+ patch_pos_embed.reshape(1, int(math.sqrt(num_pos)), int(math.sqrt(num_pos)), dim).permute(0, 3, 1, 2),
290
+ scale_factor=(num_h_patches / math.sqrt(num_pos), num_w_patches / math.sqrt(num_pos)),
291
+ mode="bicubic",
292
+ align_corners=False,
293
+ )
294
+ if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
295
+ raise ValueError(
296
+ f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
297
+ f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
298
+ )
299
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
300
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
301
+
302
+ def forward(
303
+ self,
304
+ pixel_values: torch.Tensor,
305
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
306
+ interpolate_pos_encoding: bool = False,
307
+ ) -> torch.Tensor:
308
+ batch_size, num_channels, height, width = pixel_values.shape
309
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
310
+
311
+ batch_size, seq_len, _ = embeddings.size()
312
+ if bool_masked_pos is not None:
313
+ mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
314
+ # B X H X W = B X HW
315
+ if bool_masked_pos.dim() == 3:
316
+ bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1)
317
+ # replace the masked visual tokens by mask_tokens
318
+ mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
319
+ embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
320
+
321
+ # add the [CLS] token to the embedded patch tokens
322
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
323
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
324
+
325
+ # add positional encoding to each token
326
+ if interpolate_pos_encoding:
327
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
328
+ else:
329
+ embeddings = embeddings + self.position_embeddings
330
+
331
+ embeddings = self.dropout(embeddings)
332
+
333
+ return embeddings
334
+
335
+
336
+ # Based on timm implementation, which can be found here:
337
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
338
+ class PatchEmbeddings(nn.Module):
339
+ """
340
+ Image to Patch Embedding.
341
+ """
342
+
343
+ def __init__(
344
+ self,
345
+ image_size: int = 224,
346
+ patch_size: Union[int, Tuple[int, int]] = 16,
347
+ num_channels: int = 3,
348
+ embed_dim: int = 768,
349
+ ):
350
+ super().__init__()
351
+ if not isinstance(image_size, collections.abc.Iterable):
352
+ image_size = (image_size, image_size)
353
+ if not isinstance(patch_size, collections.abc.Iterable):
354
+ patch_size = (patch_size, patch_size)
355
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
356
+ self.image_size = image_size
357
+ self.patch_size = patch_size
358
+ self.num_patches = num_patches
359
+
360
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
361
+
362
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
363
+ batch_size, num_channels, height, width = pixel_values.shape
364
+ if not interpolate_pos_encoding:
365
+ if height != self.image_size[0] or width != self.image_size[1]:
366
+ raise ValueError(
367
+ f"Input image size ({height}*{width}) doesn't match model"
368
+ f" ({self.image_size[0]}*{self.image_size[1]})."
369
+ )
370
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
371
+ return x
372
+
373
+
374
+ class FlavaTextEmbeddings(nn.Module):
375
+ """Construct the embeddings from word, position and token_type embeddings."""
376
+
377
+ def __init__(self, config):
378
+ super().__init__()
379
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
380
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
381
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
382
+
383
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
384
+ # any TensorFlow checkpoint file
385
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
386
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
387
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
388
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
389
+ self.register_buffer(
390
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
391
+ )
392
+ self.register_buffer(
393
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
394
+ )
395
+
396
+ def forward(
397
+ self,
398
+ input_ids: Optional[torch.Tensor] = None,
399
+ token_type_ids: Optional[torch.Tensor] = None,
400
+ position_ids: Optional[torch.Tensor] = None,
401
+ ):
402
+ input_shape = input_ids.size()
403
+ seq_length = input_shape[1]
404
+
405
+ if position_ids is None:
406
+ position_ids = self.position_ids[:, :seq_length]
407
+
408
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
409
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
410
+ # issue #5664
411
+ if token_type_ids is None:
412
+ if hasattr(self, "token_type_ids"):
413
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
414
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
415
+ token_type_ids = buffered_token_type_ids_expanded
416
+ else:
417
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
418
+
419
+ inputs_embeds = self.word_embeddings(input_ids)
420
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
421
+
422
+ embeddings = inputs_embeds + token_type_embeddings
423
+ if self.position_embedding_type == "absolute":
424
+ position_embeddings = self.position_embeddings(position_ids)
425
+ embeddings += position_embeddings
426
+ embeddings = self.LayerNorm(embeddings)
427
+ embeddings = self.dropout(embeddings)
428
+ return embeddings
429
+
430
+
431
+ class FlavaSelfAttention(nn.Module):
432
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
433
+ super().__init__()
434
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
435
+ raise ValueError(
436
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
437
+ f"heads {config.num_attention_heads}."
438
+ )
439
+
440
+ self.num_attention_heads = config.num_attention_heads
441
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
442
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
443
+
444
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
445
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
446
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
447
+
448
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
449
+
450
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
451
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
452
+ x = x.view(*new_x_shape)
453
+ return x.permute(0, 2, 1, 3)
454
+
455
+ def forward(
456
+ self,
457
+ hidden_states: torch.Tensor,
458
+ attention_mask: Optional[torch.Tensor] = None,
459
+ head_mask: Optional[torch.Tensor] = None,
460
+ output_attentions: bool = False,
461
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
462
+ mixed_query_layer = self.query(hidden_states)
463
+
464
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
465
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
466
+ query_layer = self.transpose_for_scores(mixed_query_layer)
467
+
468
+ # Take the dot product between "query" and "key" to get the raw attention scores.
469
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
470
+
471
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
472
+ if attention_mask is not None:
473
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
474
+ attention_scores = attention_scores + attention_mask
475
+
476
+ # Normalize the attention scores to probabilities.
477
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
478
+ # Normalize the attention scores to probabilities.
479
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
480
+
481
+ # This is actually dropping out entire tokens to attend to, which might
482
+ # seem a bit unusual, but is taken from the original Transformer paper.
483
+ attention_probs = self.dropout(attention_probs)
484
+
485
+ # Mask heads if we want to
486
+ if head_mask is not None:
487
+ attention_probs = attention_probs * head_mask
488
+
489
+ context_layer = torch.matmul(attention_probs, value_layer)
490
+
491
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
492
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
493
+ context_layer = context_layer.view(*new_context_layer_shape)
494
+
495
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
496
+
497
+ return outputs
498
+
499
+
500
+ class FlavaSelfOutput(nn.Module):
501
+ """
502
+ The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
503
+ models), due to the layernorm applied before each block.
504
+ """
505
+
506
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
507
+ super().__init__()
508
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
509
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
510
+
511
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
512
+ hidden_states = self.dense(hidden_states)
513
+ hidden_states = self.dropout(hidden_states)
514
+
515
+ return hidden_states
516
+
517
+
518
+ class FlavaAttention(nn.Module):
519
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
520
+ super().__init__()
521
+ self.attention = FlavaSelfAttention(config)
522
+ self.output = FlavaSelfOutput(config)
523
+ self.pruned_heads = set()
524
+
525
+ def prune_heads(self, heads: Set[int]) -> None:
526
+ if len(heads) == 0:
527
+ return
528
+ heads, index = find_pruneable_heads_and_indices(
529
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
530
+ )
531
+
532
+ # Prune linear layers
533
+ self.attention.query = prune_linear_layer(self.attention.query, index)
534
+ self.attention.key = prune_linear_layer(self.attention.key, index)
535
+ self.attention.value = prune_linear_layer(self.attention.value, index)
536
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
537
+
538
+ # Update hyper params and store pruned heads
539
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
540
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
541
+ self.pruned_heads = self.pruned_heads.union(heads)
542
+
543
+ def forward(
544
+ self,
545
+ hidden_states: torch.Tensor,
546
+ attention_mask: Optional[torch.Tensor] = None,
547
+ head_mask: Optional[torch.Tensor] = None,
548
+ output_attentions: bool = False,
549
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
550
+ self_outputs = self.attention(
551
+ hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions
552
+ )
553
+
554
+ attention_output = self.output(self_outputs[0], hidden_states)
555
+
556
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
557
+ return outputs
558
+
559
+
560
+ class FlavaIntermediate(nn.Module):
561
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
562
+ super().__init__()
563
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
564
+ if isinstance(config.hidden_act, str):
565
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
566
+ else:
567
+ self.intermediate_act_fn = config.hidden_act
568
+
569
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward
570
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
571
+ hidden_states = self.dense(hidden_states)
572
+ hidden_states = self.intermediate_act_fn(hidden_states)
573
+
574
+ return hidden_states
575
+
576
+
577
+ class FlavaOutput(nn.Module):
578
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
579
+ super().__init__()
580
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
581
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
582
+
583
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput.forward
584
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
585
+ hidden_states = self.dense(hidden_states)
586
+ hidden_states = self.dropout(hidden_states)
587
+
588
+ hidden_states = hidden_states + input_tensor
589
+
590
+ return hidden_states
591
+
592
+
593
+ class FlavaLayer(nn.Module):
594
+ """This corresponds to the Block class in the timm implementation."""
595
+
596
+ def __init__(self, config: FlavaPossibleConfigs) -> None:
597
+ super().__init__()
598
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
599
+ self.seq_len_dim = 1
600
+ self.attention = FlavaAttention(config)
601
+ self.intermediate = FlavaIntermediate(config)
602
+ self.output = FlavaOutput(config)
603
+
604
+ # TODO: Check fp32 layer norm possiblity
605
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
606
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
607
+
608
+ def forward(
609
+ self,
610
+ hidden_states: torch.Tensor,
611
+ attention_mask: Optional[torch.Tensor] = None,
612
+ head_mask: Optional[torch.Tensor] = None,
613
+ output_attentions: bool = False,
614
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
615
+ self_attention_outputs = self.attention(
616
+ self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
617
+ attention_mask=attention_mask,
618
+ head_mask=head_mask,
619
+ output_attentions=output_attentions,
620
+ )
621
+ attention_output = self_attention_outputs[0]
622
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
623
+
624
+ # first residual connection
625
+ hidden_states = attention_output + hidden_states
626
+
627
+ # in ViT, layernorm is also applied after self-attention
628
+ layer_output = self.layernorm_after(hidden_states)
629
+ layer_output = self.intermediate(layer_output)
630
+
631
+ # second residual connection is done here
632
+ layer_output = self.output(layer_output, hidden_states)
633
+
634
+ outputs = (layer_output,) + outputs
635
+
636
+ return outputs
637
+
638
+
639
+ class FlavaEncoder(nn.Module):
640
+ def __init__(self, config: FlavaConfig) -> None:
641
+ super().__init__()
642
+ self.config = config
643
+ self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)])
644
+ self.gradient_checkpointing = False
645
+
646
+ def forward(
647
+ self,
648
+ hidden_states: torch.Tensor,
649
+ attention_mask: Optional[torch.Tensor] = None,
650
+ head_mask: Optional[torch.Tensor] = None,
651
+ output_attentions: bool = False,
652
+ output_hidden_states: bool = False,
653
+ return_dict: bool = True,
654
+ ) -> Union[tuple, BaseModelOutput]:
655
+ all_hidden_states = () if output_hidden_states else None
656
+ all_self_attentions = () if output_attentions else None
657
+
658
+ for i, layer_module in enumerate(self.layer):
659
+ if output_hidden_states:
660
+ all_hidden_states = all_hidden_states + (hidden_states,)
661
+
662
+ layer_head_mask = head_mask[i] if head_mask is not None else None
663
+
664
+ if self.gradient_checkpointing and self.training:
665
+ layer_outputs = self._gradient_checkpointing_func(
666
+ layer_module.__call__,
667
+ hidden_states,
668
+ attention_mask,
669
+ layer_head_mask,
670
+ output_attentions,
671
+ )
672
+ else:
673
+ layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
674
+
675
+ hidden_states = layer_outputs[0]
676
+
677
+ if output_attentions:
678
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
679
+
680
+ if output_hidden_states:
681
+ all_hidden_states = all_hidden_states + (hidden_states,)
682
+
683
+ if not return_dict:
684
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
685
+ return BaseModelOutput(
686
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
687
+ )
688
+
689
+
690
+ class FlavaPooler(nn.Module):
691
+ def __init__(self, config: FlavaPossibleConfigs):
692
+ super().__init__()
693
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
694
+ self.activation = nn.Tanh()
695
+
696
+ def forward(self, hidden_states: torch.Tensor):
697
+ # We "pool" the model by simply taking the hidden state corresponding
698
+ # to the first token.
699
+ first_token_tensor = hidden_states[:, 0]
700
+ pooled_output = self.dense(first_token_tensor)
701
+ pooled_output = self.activation(pooled_output)
702
+ return pooled_output
703
+
704
+
705
+ FLAVA_START_DOCSTRING = r"""
706
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
707
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
708
+ behavior.
709
+
710
+ Parameters:
711
+ config ([`{config}`]): Model configuration class with all the parameters of the model.
712
+ Initializing with a config file does not load the weights associated with the model, only the
713
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
714
+ """
715
+
716
+ FLAVA_INPUTS_DOCSTRING_COMMON = r"""
717
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
718
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
719
+ - 1 for tokens that are **not masked**,
720
+ - 0 for tokens that are **masked**.
721
+ [What are attention masks?](../glossary#attention-mask)
722
+
723
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
724
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
725
+
726
+ - 1 indicates the head is **not masked**,
727
+ - 0 indicates the head is **masked**.
728
+
729
+ output_attentions (`bool`, *optional*):
730
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
731
+ tensors for more detail.
732
+ output_hidden_states (`bool`, *optional*):
733
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
734
+ more detail.
735
+
736
+ return_dict (`bool`, *optional*):
737
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
738
+ """
739
+
740
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = r"""
741
+ Args:
742
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
743
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
744
+ [`FlavaImageProcessor.__call__`] for details.
745
+
746
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
747
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
748
+
749
+ interpolate_pos_encoding (`bool`, *optional*):
750
+ Whether to interpolate the pre-trained position encodings.
751
+ """
752
+
753
+ FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
754
+
755
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE = r"""
756
+ Args:
757
+ input_ids (`torch.LongTensor` of shape `({0})`):
758
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
759
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
760
+ IDs?](../glossary#input-ids)
761
+
762
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
763
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
764
+ 1]`:
765
+ - 0 corresponds to a *sentence A* token,
766
+ - 1 corresponds to a *sentence B* token.
767
+ [What are token type IDs?](../glossary#token-type-ids)
768
+ """
769
+
770
+ FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
771
+
772
+ FLAVA_MULTIMODAL_INPUTS_DOCSTRING = (
773
+ r"""
774
+ Args:
775
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
776
+ The concatenated hidden states of unimodal encoders.
777
+ """
778
+ + FLAVA_INPUTS_DOCSTRING_COMMON
779
+ )
780
+
781
+ FLAVA_MODEL_INPUTS_DOCSTRING_BASE = r"""
782
+ Args:
783
+ skip_multimodal_encoder (*bool*, *optional*):
784
+ Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
785
+ """
786
+
787
+ FLAVA_MODEL_INPUTS_DOCSTRING = (
788
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
789
+ + FLAVA_TEXT_INPUTS_DOCSTRING_BASE
790
+ + FLAVA_INPUTS_DOCSTRING_COMMON
791
+ + FLAVA_MODEL_INPUTS_DOCSTRING_BASE
792
+ )
793
+
794
+
795
+ FLAVA_PRETRAINING_INPUTS_DOCSTRING = (
796
+ r"""
797
+ Args:
798
+ input_ids_masked (`torch.LongTensor` of shape `({0})`):
799
+ Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
800
+ to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
801
+ [`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
802
+ [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
803
+
804
+ """
805
+ + FLAVA_TEXT_INPUTS_DOCSTRING_BASE
806
+ + FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
807
+ + r"""
808
+ image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*):
809
+ Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
810
+ in `[0, 1]`:
811
+ - 1 for tokens that are **not masked**,
812
+ - 0 for tokens that are **masked**.
813
+ [What are attention masks?](../glossary#attention-mask)
814
+
815
+ skip_unmasked_multimodal_encoder (*bool*, *optional*):
816
+ Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
817
+ multimodal embeddings or outputs as of now.
818
+
819
+ mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
820
+ Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
821
+ Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
822
+ indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
823
+ ..., text_config.vocab_size - 1]`.
824
+
825
+ mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
826
+ Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
827
+ image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
828
+ computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
829
+ generated automatically using the image codebook assigned to the model. By default, it uses
830
+ [`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
831
+
832
+ itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
833
+ Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
834
+ The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
835
+
836
+ return_loss (`bool`, *optional*, default to None):
837
+ Whether to return calculated loss or not.
838
+ """
839
+ + FLAVA_INPUTS_DOCSTRING_COMMON
840
+ )
841
+
842
+ FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = r"""
843
+ Parameters:
844
+ image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will
845
+ be initialized using the image_codebook_config defined in the config first as the first parameter.
846
+ """
847
+
848
+
849
+ class FlavaPreTrainedModel(PreTrainedModel):
850
+ """
851
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
852
+ models.
853
+ """
854
+
855
+ config_class = FlavaConfig
856
+ base_model_prefix = "flava"
857
+ supports_gradient_checkpointing = True
858
+
859
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
860
+ """Initialize the weights"""
861
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
862
+ # Slightly different from the TF version which uses truncated_normal for initialization
863
+ # cf https://github.com/pytorch/pytorch/pull/5617
864
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
865
+ if module.bias is not None:
866
+ module.bias.data.zero_()
867
+ elif isinstance(module, nn.Embedding):
868
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
869
+ if module.padding_idx is not None:
870
+ module.weight.data[module.padding_idx].zero_()
871
+ elif isinstance(module, nn.LayerNorm):
872
+ module.bias.data.zero_()
873
+ module.weight.data.fill_(1.0)
874
+
875
+
876
+ @add_start_docstrings(
877
+ "The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.",
878
+ FLAVA_START_DOCSTRING.format(config="FlavaImageConfig"),
879
+ )
880
+ class FlavaImageModel(FlavaPreTrainedModel):
881
+ config_class = FlavaImageConfig
882
+ # This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints.
883
+ base_model_prefix = "flava.image_model"
884
+ main_input_name = "pixel_values"
885
+
886
+ def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True):
887
+ super().__init__(config)
888
+
889
+ self.config = config
890
+
891
+ self.embeddings = FlavaImageEmbeddings(config)
892
+ self.encoder = FlavaEncoder(config)
893
+
894
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
895
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
896
+
897
+ self.post_init()
898
+
899
+ def get_input_embeddings(self) -> nn.Module:
900
+ return self.embeddings.patch_embeddings
901
+
902
+ def set_input_embeddings(self, value: nn.Module):
903
+ self.embeddings.patch_embeddings = value
904
+
905
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
906
+ """
907
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
908
+ class PreTrainedModel
909
+ """
910
+ for layer, heads in heads_to_prune.items():
911
+ self.encoder.layer[layer].attention.prune_heads(heads)
912
+
913
+ @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
914
+ @add_code_sample_docstrings(
915
+ checkpoint=_CHECKPOINT_FOR_DOC,
916
+ output_type=BaseModelOutputWithPooling,
917
+ config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC,
918
+ modality="vision",
919
+ expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE,
920
+ )
921
+ def forward(
922
+ self,
923
+ pixel_values: Optional[torch.Tensor] = None,
924
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
925
+ interpolate_pos_encoding: Optional[bool] = None,
926
+ attention_mask: Optional[torch.Tensor] = None,
927
+ head_mask: Optional[torch.Tensor] = None,
928
+ output_attentions: Optional[bool] = None,
929
+ output_hidden_states: Optional[bool] = None,
930
+ return_dict: Optional[bool] = None,
931
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
932
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
933
+ output_hidden_states = (
934
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
935
+ )
936
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
937
+
938
+ if pixel_values is None:
939
+ raise ValueError("You have to specify pixel_values")
940
+
941
+ # Prepare head mask if needed
942
+ # 1.0 in head_mask indicate we keep the head
943
+ # attention_probs has shape bsz x n_heads x N x N
944
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
945
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
946
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
947
+
948
+ embedding_output = self.embeddings(
949
+ pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
950
+ )
951
+
952
+ encoder_outputs = self.encoder(
953
+ embedding_output,
954
+ attention_mask=attention_mask,
955
+ head_mask=head_mask,
956
+ output_attentions=output_attentions,
957
+ output_hidden_states=output_hidden_states,
958
+ return_dict=return_dict,
959
+ )
960
+ sequence_output = encoder_outputs[0]
961
+ sequence_output = self.layernorm(sequence_output)
962
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
963
+
964
+ if not return_dict:
965
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
966
+
967
+ return BaseModelOutputWithPooling(
968
+ last_hidden_state=sequence_output,
969
+ pooler_output=pooled_output,
970
+ hidden_states=encoder_outputs.hidden_states,
971
+ attentions=encoder_outputs.attentions,
972
+ )
973
+
974
+
975
+ @add_start_docstrings(
976
+ "The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.",
977
+ FLAVA_START_DOCSTRING.format(config="FlavaTextConfig"),
978
+ )
979
+ class FlavaTextModel(FlavaPreTrainedModel):
980
+ config_class = FlavaTextConfig
981
+ # This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints.
982
+ base_model_prefix = "flava.text_model"
983
+
984
+ def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True):
985
+ super().__init__(config)
986
+ self.config = config
987
+
988
+ self.embeddings = FlavaTextEmbeddings(config)
989
+ self.encoder = FlavaEncoder(config)
990
+
991
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
992
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
993
+
994
+ self.post_init()
995
+
996
+ def get_input_embeddings(self) -> PatchEmbeddings:
997
+ return self.embeddings.word_embeddings
998
+
999
+ def set_input_embeddings(self, value: nn.Module):
1000
+ self.embeddings.word_embeddings = value
1001
+
1002
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
1003
+ """
1004
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1005
+ class PreTrainedModel
1006
+ """
1007
+ for layer, heads in heads_to_prune.items():
1008
+ self.encoder.layer[layer].attention.prune_heads(heads)
1009
+
1010
+ @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
1011
+ @add_code_sample_docstrings(
1012
+ checkpoint=_CHECKPOINT_FOR_DOC,
1013
+ output_type=BaseModelOutputWithPooling,
1014
+ config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC,
1015
+ )
1016
+ def forward(
1017
+ self,
1018
+ input_ids: Optional[torch.Tensor] = None,
1019
+ attention_mask: Optional[torch.Tensor] = None,
1020
+ token_type_ids: Optional[torch.Tensor] = None,
1021
+ position_ids: Optional[torch.Tensor] = None,
1022
+ head_mask: Optional[torch.Tensor] = None,
1023
+ output_attentions: Optional[bool] = None,
1024
+ output_hidden_states: Optional[bool] = None,
1025
+ return_dict: Optional[bool] = None,
1026
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
1027
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1028
+ output_hidden_states = (
1029
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1030
+ )
1031
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1032
+
1033
+ if input_ids is None:
1034
+ raise ValueError("You have to specify input_ids")
1035
+
1036
+ input_shape = input_ids.size()
1037
+
1038
+ if attention_mask is None:
1039
+ attention_mask = torch.ones(input_shape, device=input_ids.device)
1040
+
1041
+ # Prepare head mask if needed
1042
+ # 1.0 in head_mask indicate we keep the head
1043
+ # attention_probs has shape bsz x n_heads x N x N
1044
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1045
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1046
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1047
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1048
+ attention_mask, input_shape, input_ids.device
1049
+ )
1050
+
1051
+ embedding_output = self.embeddings(
1052
+ input_ids=input_ids,
1053
+ token_type_ids=token_type_ids,
1054
+ position_ids=position_ids,
1055
+ )
1056
+
1057
+ encoder_outputs = self.encoder(
1058
+ embedding_output,
1059
+ attention_mask=extended_attention_mask,
1060
+ head_mask=head_mask,
1061
+ output_attentions=output_attentions,
1062
+ output_hidden_states=output_hidden_states,
1063
+ return_dict=return_dict,
1064
+ )
1065
+ sequence_output = encoder_outputs[0]
1066
+ sequence_output = self.layernorm(sequence_output)
1067
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1068
+
1069
+ if not return_dict:
1070
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1071
+
1072
+ return BaseModelOutputWithPooling(
1073
+ last_hidden_state=sequence_output,
1074
+ pooler_output=pooled_output,
1075
+ hidden_states=encoder_outputs.hidden_states,
1076
+ attentions=encoder_outputs.attentions,
1077
+ )
1078
+
1079
+
1080
+ @add_start_docstrings(
1081
+ "The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.",
1082
+ FLAVA_START_DOCSTRING.format(config="FlavaMultimodalConfig"),
1083
+ )
1084
+ class FlavaMultimodalModel(FlavaPreTrainedModel):
1085
+ config_class = FlavaMultimodalConfig
1086
+ # This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints.
1087
+ base_model_prefix = "flava.multimodal_model"
1088
+ main_input_name = "hidden_states"
1089
+
1090
+ def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
1091
+ super().__init__(config)
1092
+ self.config = config
1093
+ self.use_cls_token = self.config.use_cls_token
1094
+ if self.use_cls_token:
1095
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
1096
+
1097
+ self.encoder = FlavaEncoder(config)
1098
+
1099
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1100
+ self.pooler = FlavaPooler(config) if add_pooling_layer else None
1101
+
1102
+ self.post_init()
1103
+
1104
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
1105
+ """
1106
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
1107
+ class PreTrainedModel
1108
+ """
1109
+ for layer, heads in heads_to_prune.items():
1110
+ self.encoder.layer[layer].attention.prune_heads(heads)
1111
+
1112
+ @add_start_docstrings_to_model_forward(
1113
+ FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
1114
+ )
1115
+ @add_code_sample_docstrings(
1116
+ checkpoint=_CHECKPOINT_FOR_DOC,
1117
+ output_type=BaseModelOutputWithPooling,
1118
+ config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC,
1119
+ )
1120
+ def forward(
1121
+ self,
1122
+ hidden_states: torch.Tensor,
1123
+ attention_mask: Optional[torch.Tensor] = None,
1124
+ head_mask: Optional[torch.Tensor] = None,
1125
+ output_attentions: Optional[bool] = None,
1126
+ output_hidden_states: Optional[bool] = None,
1127
+ return_dict: Optional[bool] = None,
1128
+ ) -> Union[tuple, BaseModelOutputWithPooling]:
1129
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1130
+ output_hidden_states = (
1131
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1132
+ )
1133
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1134
+
1135
+ batch_size, seq_length, _ = hidden_states.size()
1136
+
1137
+ if self.use_cls_token:
1138
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
1139
+ hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
1140
+ seq_length += 1
1141
+
1142
+ if attention_mask is None:
1143
+ attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
1144
+
1145
+ # Prepare head mask if needed
1146
+ # 1.0 in head_mask indicate we keep the head
1147
+ # attention_probs has shape bsz x n_heads x N x N
1148
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1149
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1150
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1151
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
1152
+ attention_mask, (batch_size, seq_length), hidden_states.device
1153
+ )
1154
+
1155
+ encoder_outputs = self.encoder(
1156
+ hidden_states,
1157
+ attention_mask=extended_attention_mask,
1158
+ head_mask=head_mask,
1159
+ output_attentions=output_attentions,
1160
+ output_hidden_states=output_hidden_states,
1161
+ return_dict=return_dict,
1162
+ )
1163
+ sequence_output = encoder_outputs[0]
1164
+ sequence_output = self.layernorm(sequence_output)
1165
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1166
+
1167
+ if not return_dict:
1168
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1169
+
1170
+ return BaseModelOutputWithPooling(
1171
+ last_hidden_state=sequence_output,
1172
+ pooler_output=pooled_output,
1173
+ hidden_states=encoder_outputs.hidden_states,
1174
+ attentions=encoder_outputs.attentions,
1175
+ )
1176
+
1177
+
1178
+ @add_start_docstrings(
1179
+ "The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.",
1180
+ FLAVA_START_DOCSTRING.format(config="FlavaConfig"),
1181
+ )
1182
+ class FlavaModel(FlavaPreTrainedModel):
1183
+ config_class = FlavaConfig
1184
+
1185
+ def __init__(self, config: FlavaConfig):
1186
+ super().__init__(config)
1187
+
1188
+ if not isinstance(config.text_config, FlavaTextConfig):
1189
+ raise ValueError(
1190
+ "config.text_config is expected to be of type FlavaTextConfig but is of type"
1191
+ f" {type(config.text_config)}."
1192
+ )
1193
+
1194
+ if not isinstance(config.image_config, FlavaImageConfig):
1195
+ raise ValueError(
1196
+ "config.image_config is expected to be of type FlavaImageConfig but is of type"
1197
+ f" {type(config.image_config)}."
1198
+ )
1199
+
1200
+ if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
1201
+ raise ValueError(
1202
+ "config.multimodal_config is expected to be of type FlavaMultimodalConfig but "
1203
+ + f"is of type {type(config.multimodal_config)}."
1204
+ )
1205
+
1206
+ text_config = config.text_config
1207
+ image_config = config.image_config
1208
+ multimodal_config = config.multimodal_config
1209
+
1210
+ self.projection_dim = config.projection_dim
1211
+ self.text_hidden_size = text_config.hidden_size
1212
+ self.image_hidden_size = image_config.hidden_size
1213
+ self.mm_hidden_size = multimodal_config.hidden_size
1214
+
1215
+ self.text_model = FlavaTextModel(text_config)
1216
+ self.image_model = FlavaImageModel(image_config)
1217
+ self.multimodal_model = FlavaMultimodalModel(multimodal_config)
1218
+
1219
+ self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim)
1220
+ self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim)
1221
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
1222
+
1223
+ self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size)
1224
+ self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size)
1225
+ # Initialize weights and apply final processing
1226
+ self.post_init()
1227
+
1228
+ @add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
1229
+ def get_text_features(
1230
+ self,
1231
+ input_ids: Optional[torch.Tensor] = None,
1232
+ attention_mask: Optional[torch.Tensor] = None,
1233
+ token_type_ids: Optional[torch.Tensor] = None,
1234
+ position_ids: Optional[torch.Tensor] = None,
1235
+ output_attentions: Optional[bool] = None,
1236
+ output_hidden_states: Optional[bool] = None,
1237
+ return_dict: Optional[bool] = None,
1238
+ ) -> torch.FloatTensor:
1239
+ r"""
1240
+ Returns:
1241
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1242
+ applying the projection layer to the pooled output of [`FlavaTextModel`].
1243
+
1244
+ Examples:
1245
+
1246
+ ```python
1247
+ >>> from transformers import AutoProcessor, FlavaModel
1248
+
1249
+ >>> model = FlavaModel.from_pretrained("{0}")
1250
+ >>> processor = AutoProcessor.from_pretrained("{0}")
1251
+
1252
+ >>> inputs = processor(
1253
+ ... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
1254
+ ... )
1255
+ >>> text_features = model.get_text_features(**inputs)
1256
+ ```""".format(_CHECKPOINT_FOR_DOC)
1257
+ text_outputs = self.text_model(
1258
+ input_ids=input_ids,
1259
+ attention_mask=attention_mask,
1260
+ token_type_ids=token_type_ids,
1261
+ position_ids=position_ids,
1262
+ output_attentions=output_attentions,
1263
+ output_hidden_states=output_hidden_states,
1264
+ return_dict=return_dict,
1265
+ )
1266
+
1267
+ pooled_output = text_outputs[0] # last_hidden_state
1268
+ text_features = self.text_projection(pooled_output)
1269
+
1270
+ return text_features
1271
+
1272
+ @add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
1273
+ def get_image_features(
1274
+ self,
1275
+ pixel_values: Optional[torch.Tensor] = None,
1276
+ bool_masked_pos: Optional[torch.BoolTensor] = None,
1277
+ interpolate_pos_encoding: Optional[bool] = None,
1278
+ attention_mask: Optional[torch.Tensor] = None,
1279
+ head_mask: Optional[torch.Tensor] = None,
1280
+ output_attentions: Optional[bool] = None,
1281
+ output_hidden_states: Optional[bool] = None,
1282
+ return_dict: Optional[bool] = None,
1283
+ ) -> torch.FloatTensor:
1284
+ r"""
1285
+ Returns:
1286
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1287
+ applying the projection layer to the pooled output of [`FlavaImageModel`].
1288
+
1289
+ Examples:
1290
+
1291
+ ```python
1292
+ >>> from PIL import Image
1293
+ >>> import requests
1294
+ >>> from transformers import AutoProcessor, FlavaModel
1295
+
1296
+ >>> model = FlavaModel.from_pretrained("{0}")
1297
+ >>> processor = AutoProcessor.from_pretrained("{0}")
1298
+
1299
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1300
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1301
+
1302
+ >>> inputs = processor(images=image, return_tensors="pt")
1303
+
1304
+ >>> image_features = model.get_image_features(**inputs)
1305
+ ```""".format(_CHECKPOINT_FOR_DOC)
1306
+ image_outputs = self.image_model(
1307
+ pixel_values=pixel_values,
1308
+ bool_masked_pos=bool_masked_pos,
1309
+ attention_mask=attention_mask,
1310
+ head_mask=head_mask,
1311
+ output_attentions=output_attentions,
1312
+ output_hidden_states=output_hidden_states,
1313
+ interpolate_pos_encoding=interpolate_pos_encoding,
1314
+ return_dict=return_dict,
1315
+ )
1316
+
1317
+ pooled_output = image_outputs[0] # last_hidden_state
1318
+ image_features = self.image_projection(pooled_output)
1319
+
1320
+ return image_features
1321
+
1322
+ @add_start_docstrings_to_model_forward(
1323
+ FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
1324
+ )
1325
+ @replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig)
1326
+ def forward(
1327
+ self,
1328
+ input_ids: Optional[torch.LongTensor] = None,
1329
+ pixel_values: Optional[torch.FloatTensor] = None,
1330
+ attention_mask: Optional[torch.Tensor] = None,
1331
+ token_type_ids: Optional[torch.Tensor] = None,
1332
+ bool_masked_pos: Optional[torch.Tensor] = None,
1333
+ position_ids: Optional[torch.LongTensor] = None,
1334
+ image_attention_mask: Optional[torch.Tensor] = None,
1335
+ skip_multimodal_encoder: Optional[bool] = None,
1336
+ output_attentions: Optional[bool] = None,
1337
+ output_hidden_states: bool = True,
1338
+ return_dict: Optional[bool] = None,
1339
+ ) -> Union[Tuple, FlavaOutput]:
1340
+ r"""
1341
+ Returns:
1342
+
1343
+ Examples:
1344
+
1345
+ ```python
1346
+ >>> from PIL import Image
1347
+ >>> import requests
1348
+ >>> from transformers import AutoProcessor, FlavaModel
1349
+
1350
+ >>> model = FlavaModel.from_pretrained("facebook/flava-full")
1351
+ >>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
1352
+
1353
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1354
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1355
+
1356
+ >>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
1357
+
1358
+ >>> outputs = model(**inputs)
1359
+
1360
+ >>> image_embeddings = outputs.image_embeddings
1361
+ >>> text_embeddings = outputs.text_embeddings
1362
+ >>> multimodal_embeddings = outputs.multimodal_embeddings
1363
+
1364
+ >>> outputs.image_embeddings.shape
1365
+ torch.Size([1, 197, 768])
1366
+
1367
+ >>> text_embeddings.shape
1368
+ torch.Size([1, 7, 768])
1369
+
1370
+ >>> multimodal_embeddings.shape
1371
+ torch.Size([1, 205, 768])
1372
+ ```
1373
+ """
1374
+
1375
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1376
+ if not output_hidden_states:
1377
+ raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`")
1378
+ image_embeddings = None
1379
+ image_states = None
1380
+ image_mm_projection = None
1381
+ image_output = None
1382
+ if pixel_values is not None:
1383
+ image_output = self.image_model(
1384
+ pixel_values=pixel_values,
1385
+ bool_masked_pos=bool_masked_pos,
1386
+ attention_mask=image_attention_mask,
1387
+ output_attentions=output_attentions,
1388
+ output_hidden_states=output_hidden_states,
1389
+ return_dict=return_dict,
1390
+ )
1391
+ image_embeddings, image_states = image_output[0], image_output[2]
1392
+ # Note that these states don't use final layernorm in the transformer model
1393
+ image_mm_projection = self.image_to_mm_projection(image_states[-1])
1394
+
1395
+ text_embeddings = None
1396
+ text_states = None
1397
+ text_mm_projection = None
1398
+ text_output = None
1399
+ if input_ids is not None:
1400
+ text_output = self.text_model(
1401
+ input_ids=input_ids,
1402
+ attention_mask=attention_mask,
1403
+ position_ids=position_ids,
1404
+ token_type_ids=token_type_ids,
1405
+ output_attentions=output_attentions,
1406
+ output_hidden_states=output_hidden_states,
1407
+ return_dict=return_dict,
1408
+ )
1409
+
1410
+ text_embeddings, text_states = text_output[0], text_output[2]
1411
+ # Note that these states don't use final layernorm in the transformer model
1412
+ text_mm_projection = self.text_to_mm_projection(text_states[-1])
1413
+
1414
+ multimodal_embeddings = None
1415
+ multimodal_output = None
1416
+ if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder:
1417
+ if attention_mask is not None:
1418
+ batch_size, seq_len, _ = image_mm_projection.shape
1419
+ if self.multimodal_model.use_cls_token:
1420
+ seq_len += 1
1421
+ attention_mask_image = torch.ones(batch_size, seq_len, device=image_mm_projection.device)
1422
+ attention_multimodal = torch.cat([attention_mask_image, attention_mask], dim=1)
1423
+ else:
1424
+ attention_multimodal = None
1425
+ multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
1426
+ multimodal_output = self.multimodal_model(
1427
+ multimodal_input, attention_mask=attention_multimodal, return_dict=return_dict
1428
+ )
1429
+ multimodal_embeddings = multimodal_output[0]
1430
+
1431
+ if not return_dict:
1432
+ return (
1433
+ image_embeddings,
1434
+ image_output,
1435
+ text_embeddings,
1436
+ text_output,
1437
+ multimodal_embeddings,
1438
+ multimodal_output,
1439
+ )
1440
+
1441
+ return FlavaModelOutput(
1442
+ image_embeddings=image_embeddings,
1443
+ image_output=image_output,
1444
+ text_embeddings=text_embeddings,
1445
+ text_output=text_output,
1446
+ multimodal_embeddings=multimodal_embeddings,
1447
+ multimodal_output=multimodal_output,
1448
+ )
1449
+
1450
+
1451
+ class FlavaImageCodebookResPath(nn.Module):
1452
+ def __init__(self, in_size: int, out_size: int, **kwargs):
1453
+ super().__init__()
1454
+ hid_size = out_size // 4
1455
+
1456
+ path = OrderedDict()
1457
+ path["relu_1"] = nn.ReLU()
1458
+ path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
1459
+ path["relu_2"] = nn.ReLU()
1460
+ path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
1461
+ path["relu_3"] = nn.ReLU()
1462
+ path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
1463
+ path["relu_4"] = nn.ReLU()
1464
+ path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
1465
+
1466
+ self.path = nn.Sequential(path)
1467
+
1468
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1469
+ return self.path(x)
1470
+
1471
+
1472
+ class FlavaImageCodebookBlock(nn.Module):
1473
+ def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
1474
+ super().__init__()
1475
+
1476
+ self.post_gain = 1 / (num_layers**2)
1477
+
1478
+ if in_size != out_size:
1479
+ self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
1480
+ else:
1481
+ self.id_path = nn.Identity()
1482
+
1483
+ self.res_path = FlavaImageCodebookResPath(in_size, out_size)
1484
+
1485
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1486
+ return self.id_path(x) + self.post_gain * self.res_path(x)
1487
+
1488
+
1489
+ class FlavaImageCodebookLayerGroup(nn.Module):
1490
+ def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True):
1491
+ super().__init__()
1492
+ blocks = OrderedDict()
1493
+ for i in range(num_blocks):
1494
+ if i == 0:
1495
+ blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
1496
+ else:
1497
+ blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
1498
+
1499
+ if use_pool:
1500
+ blocks["pool"] = nn.MaxPool2d(kernel_size=2)
1501
+
1502
+ self.group = nn.Sequential(blocks)
1503
+
1504
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
1505
+ return self.group(x)
1506
+
1507
+
1508
+ # Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42
1509
+ @add_start_docstrings(
1510
+ """
1511
+ The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used
1512
+ to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use
1513
+ `get_codebook_indices` to get image tokens for an image.
1514
+ """,
1515
+ FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"),
1516
+ )
1517
+ class FlavaImageCodebook(FlavaPreTrainedModel):
1518
+ base_model_prefix = ""
1519
+ config_class = FlavaImageCodebookConfig
1520
+ main_input_name = "pixel_values"
1521
+ supports_gradient_checkpointing = False
1522
+
1523
+ def __init__(
1524
+ self,
1525
+ config: FlavaImageCodebookConfig,
1526
+ **kwargs: Any,
1527
+ ):
1528
+ super().__init__(config)
1529
+
1530
+ self.config = config
1531
+ self.num_groups = config.num_groups
1532
+ self.input_channels = config.input_channels
1533
+ self.num_blocks_per_group = config.num_blocks_per_group
1534
+ self.hidden_size = config.hidden_size
1535
+ self.vocab_size = config.vocab_size
1536
+
1537
+ num_layers = self.num_groups * self.num_blocks_per_group
1538
+
1539
+ output_blocks = OrderedDict()
1540
+ output_blocks["relu"] = nn.ReLU()
1541
+ output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
1542
+
1543
+ blocks = OrderedDict()
1544
+ blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
1545
+ blocks["group_1"] = FlavaImageCodebookLayerGroup(
1546
+ self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size
1547
+ )
1548
+ blocks["group_2"] = FlavaImageCodebookLayerGroup(
1549
+ self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size
1550
+ )
1551
+ blocks["group_3"] = FlavaImageCodebookLayerGroup(
1552
+ self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size
1553
+ )
1554
+ blocks["group_4"] = FlavaImageCodebookLayerGroup(
1555
+ self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False
1556
+ )
1557
+ blocks["output"] = nn.Sequential(output_blocks)
1558
+
1559
+ self.blocks = nn.Sequential(blocks)
1560
+
1561
+ self.post_init()
1562
+
1563
+ if self.config.freeze:
1564
+ for param in self.parameters():
1565
+ param.requires_grad = False
1566
+
1567
+ def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
1568
+ """
1569
+ Args:
1570
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1571
+ Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
1572
+ `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
1573
+
1574
+ Examples:
1575
+ ```python
1576
+ >>> from PIL import Image
1577
+ >>> import requests
1578
+ >>> from transformers import AutoImageProcessor, FlavaImageCodebook
1579
+
1580
+ >>> model = FlavaImageCodebook.from_pretrained("{0}")
1581
+ >>> image_processor = AutoImageProcessor.from_pretrained("{0}")
1582
+
1583
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1584
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1585
+
1586
+ >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
1587
+ >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
1588
+
1589
+ >>> outputs = model.get_codebook_indices(**inputs)
1590
+ ```
1591
+ """.format(_CHECKPOINT_FOR_CODEBOOK_DOC)
1592
+ z_logits = self.blocks(pixel_values)
1593
+ return torch.argmax(z_logits, axis=1)
1594
+
1595
+ def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
1596
+ z_logits = self.blocks(pixel_values)
1597
+ return nn.Softmax(dim=1)(z_logits)
1598
+
1599
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
1600
+ """
1601
+ Args:
1602
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
1603
+ Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
1604
+ `return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
1605
+
1606
+ Examples:
1607
+
1608
+ ```python
1609
+ >>> from PIL import Image
1610
+ >>> import requests
1611
+ >>> from transformers import AutoImageProcessor, FlavaImageCodebook
1612
+
1613
+ >>> model = FlavaImageCodebook.from_pretrained("{0}")
1614
+ >>> image_processor = AutoImageProcessor.from_pretrained("{0}")
1615
+
1616
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1617
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1618
+
1619
+ >>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
1620
+ >>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
1621
+
1622
+ >>> outputs = model(**inputs)
1623
+ >>> print(outputs.shape)
1624
+ (1, 196)
1625
+ ```
1626
+ """.format(_CHECKPOINT_FOR_CODEBOOK_DOC)
1627
+ if len(pixel_values.shape) != 4:
1628
+ raise ValueError(f"input shape {pixel_values.shape} is not 4d")
1629
+ if pixel_values.shape[1] != self.input_channels:
1630
+ raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}")
1631
+ return self.blocks(pixel_values)
1632
+
1633
+
1634
+ class FlavaPredictionHeadTransform(nn.Module):
1635
+ def __init__(self, config):
1636
+ super().__init__()
1637
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1638
+ if isinstance(config.hidden_act, str):
1639
+ self.transform_act_fn = ACT2FN[config.hidden_act]
1640
+ else:
1641
+ self.transform_act_fn = config.hidden_act
1642
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
1643
+
1644
+ def forward(self, hidden_states):
1645
+ hidden_states = self.dense(hidden_states)
1646
+ hidden_states = self.transform_act_fn(hidden_states)
1647
+ hidden_states = self.LayerNorm(hidden_states)
1648
+ return hidden_states
1649
+
1650
+
1651
+ class FlavaMaskedPredictionHead(nn.Module):
1652
+ def __init__(self, config, weight=None):
1653
+ super().__init__()
1654
+ self.config = config
1655
+ self.transform = FlavaPredictionHeadTransform(config)
1656
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1657
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
1658
+ if weight is not None:
1659
+ self.decoder.weight = weight
1660
+
1661
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
1662
+ self.decoder.bias = self.bias
1663
+
1664
+ def forward(self, x):
1665
+ x = self.transform(x)
1666
+ x = self.decoder(x)
1667
+ return x
1668
+
1669
+
1670
+ class FlavaITMHead(nn.Module):
1671
+ def __init__(self, config):
1672
+ super().__init__()
1673
+ self.config = config
1674
+ self.pooler = FlavaPooler(config)
1675
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
1676
+
1677
+ def forward(self, x):
1678
+ x = self.pooler(x)
1679
+ x = self.seq_relationship(x)
1680
+ return x
1681
+
1682
+
1683
+ class FlavaGlobalContrastiveHead(nn.Module):
1684
+ def __init__(self, config):
1685
+ super().__init__()
1686
+ self.config = config
1687
+ self.global_backprop_contrastive = config.global_backprop_contrastive
1688
+
1689
+ def forward(self, image_embeddings, text_embeddings, logit_scale):
1690
+ temperature = torch.exp(logit_scale)
1691
+ if not torch.distributed.is_available() or not torch.distributed.is_initialized():
1692
+ labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
1693
+ image_embeddings_all = [image_embeddings]
1694
+ text_embeddings_all = [text_embeddings]
1695
+ else:
1696
+ local_batch_size = image_embeddings.size(0)
1697
+ world_size = torch.distributed.get_world_size()
1698
+
1699
+ if self.global_backprop_contrastive:
1700
+ # `torch.distributed.nn.functional.all_gather` does backprop on all active workers
1701
+ # whereas `torch.distributed.all_gather` does only backpropagates on the current worker.
1702
+ image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings)
1703
+ text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings)
1704
+ else:
1705
+ image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
1706
+ text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
1707
+ torch.distributed.all_gather(image_embeddings_all, image_embeddings)
1708
+ torch.distributed.all_gather(text_embeddings_all, text_embeddings)
1709
+
1710
+ labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
1711
+ local_batch_size, device=image_embeddings.device
1712
+ )
1713
+
1714
+ image_embeddings_all = torch.cat(image_embeddings_all)
1715
+ text_embeddings_all = torch.cat(text_embeddings_all)
1716
+
1717
+ logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
1718
+ logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
1719
+
1720
+ return logits_per_image, logits_per_text, labels
1721
+
1722
+
1723
+ @add_start_docstrings(
1724
+ """
1725
+ The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
1726
+ """,
1727
+ FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA,
1728
+ )
1729
+ class FlavaForPreTraining(FlavaPreTrainedModel):
1730
+ # Those are linked to xxx.bias
1731
+ _tied_weights_keys = [
1732
+ "mmm_text_head.decoder.bias",
1733
+ "mmm_image_head.decoder.bias",
1734
+ "mlm_head.decoder.bias",
1735
+ "mim_head.decoder.bias",
1736
+ ]
1737
+
1738
+ def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
1739
+ super().__init__(config)
1740
+ self.flava = FlavaModel(config)
1741
+
1742
+ self.image_codebook = image_codebook
1743
+ if self.image_codebook is None and config.init_codebook:
1744
+ self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
1745
+
1746
+ # Levarage text and image encoder configs to create the masked
1747
+ # head since it has the right vocab
1748
+ self.mim_head = FlavaMaskedPredictionHead(config.image_config)
1749
+ self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
1750
+ self.itm_head = FlavaITMHead(config)
1751
+ self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
1752
+ self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
1753
+ self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
1754
+
1755
+ self.image_vocab_size = config.image_config.vocab_size
1756
+ self.text_vocab_size = config.text_config.vocab_size
1757
+ self.mlm_weight = config.mlm_weight
1758
+ self.mim_weight = config.mim_weight
1759
+ self.global_contrastive_weight = config.global_contrastive_weight
1760
+ self.ce_ignore_index = config.ce_ignore_index
1761
+ self.itm_weight = config.itm_weight
1762
+ self.mmm_image_weight = config.mmm_image_weight
1763
+ self.mmm_text_weight = config.mmm_text_weight
1764
+ self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
1765
+
1766
+ self.post_init()
1767
+
1768
+ def _resize_to_2d(self, x: torch.Tensor):
1769
+ if x.dim() > 2:
1770
+ x = x.view(x.size(0), -1)
1771
+ return x
1772
+
1773
+ @add_start_docstrings_to_model_forward(
1774
+ FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches")
1775
+ )
1776
+ @replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig)
1777
+ def forward(
1778
+ self,
1779
+ input_ids: Optional[torch.LongTensor] = None,
1780
+ input_ids_masked: Optional[torch.LongTensor] = None,
1781
+ pixel_values: Optional[torch.FloatTensor] = None,
1782
+ codebook_pixel_values: Optional[torch.FloatTensor] = None,
1783
+ attention_mask: Optional[torch.Tensor] = None,
1784
+ token_type_ids: Optional[torch.Tensor] = None,
1785
+ bool_masked_pos: Optional[torch.Tensor] = None,
1786
+ position_ids: Optional[torch.LongTensor] = None,
1787
+ image_attention_mask: Optional[torch.Tensor] = None,
1788
+ skip_unmasked_multimodal_encoder: bool = None,
1789
+ mlm_labels: Optional[torch.Tensor] = None,
1790
+ mim_labels: Optional[torch.Tensor] = None,
1791
+ itm_labels: Optional[torch.Tensor] = None,
1792
+ output_attentions: Optional[bool] = None,
1793
+ output_hidden_states: bool = True,
1794
+ return_dict: Optional[bool] = None,
1795
+ return_loss: Optional[bool] = None,
1796
+ ) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]:
1797
+ """
1798
+ Examples:
1799
+ ```python
1800
+ >>> from PIL import Image
1801
+ >>> import requests
1802
+ >>> from transformers import FlavaForPreTraining, AutoProcessor
1803
+
1804
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1805
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1806
+
1807
+ >>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
1808
+ >>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
1809
+
1810
+ >>> text = ["a photo of a cat"]
1811
+
1812
+ >>> inputs = processor(
1813
+ ... images=[image],
1814
+ ... text=text,
1815
+ ... return_masks=True,
1816
+ ... return_codebook_pixels=True,
1817
+ ... padding=True,
1818
+ ... max_length=77,
1819
+ ... return_tensors="pt",
1820
+ ... )
1821
+
1822
+
1823
+ >>> output = model(**inputs)
1824
+ ```
1825
+
1826
+ Return:
1827
+
1828
+ """
1829
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1830
+ return_loss = return_loss if return_loss is not None else self.config.return_loss
1831
+
1832
+ skip_unmasked_multimodal_encoder = (
1833
+ skip_unmasked_multimodal_encoder
1834
+ if skip_unmasked_multimodal_encoder is not None
1835
+ else self.skip_unmasked_multimodal_encoder
1836
+ )
1837
+
1838
+ if input_ids_masked is None and input_ids is not None:
1839
+ logger.warning(
1840
+ "`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to"
1841
+ " `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if"
1842
+ " you are doing inference on unmasked text..."
1843
+ )
1844
+ input_ids_masked = input_ids
1845
+
1846
+ flava_output = self.flava(
1847
+ input_ids=input_ids,
1848
+ pixel_values=pixel_values,
1849
+ attention_mask=attention_mask,
1850
+ token_type_ids=token_type_ids,
1851
+ position_ids=position_ids,
1852
+ image_attention_mask=image_attention_mask,
1853
+ # Don't need unmasked multimodal embedding for anything so skip it
1854
+ # NOTE: ITM uses masked version
1855
+ skip_multimodal_encoder=skip_unmasked_multimodal_encoder,
1856
+ output_attentions=output_attentions,
1857
+ output_hidden_states=output_hidden_states,
1858
+ # Pass true to have deterministic outputs
1859
+ return_dict=True,
1860
+ )
1861
+
1862
+ flava_masked_output = self.flava(
1863
+ input_ids=input_ids_masked,
1864
+ pixel_values=pixel_values,
1865
+ attention_mask=attention_mask,
1866
+ token_type_ids=token_type_ids,
1867
+ image_attention_mask=image_attention_mask,
1868
+ bool_masked_pos=bool_masked_pos,
1869
+ output_attentions=output_attentions,
1870
+ output_hidden_states=output_hidden_states,
1871
+ return_dict=True,
1872
+ )
1873
+
1874
+ pos_mask = None
1875
+
1876
+ image_embeddings = flava_output.image_embeddings
1877
+ text_embeddings = flava_output.text_embeddings
1878
+ image_masked_embeddings = flava_masked_output.image_embeddings
1879
+ text_masked_embeddings = flava_masked_output.text_embeddings
1880
+ multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
1881
+
1882
+ total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
1883
+ mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
1884
+ itm_logits = logits_per_image = logits_per_text = None
1885
+
1886
+ # Calculate mim_labels if necessary from the image_codebook
1887
+ if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
1888
+ if mim_labels is None and return_loss:
1889
+ if self.image_codebook is None:
1890
+ raise RuntimeError(
1891
+ "`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` "
1892
+ " have been passed. Reinstantiate the model with `init_codebook` set to True or "
1893
+ "pass in your custom `mim_labels`"
1894
+ )
1895
+ if codebook_pixel_values is None:
1896
+ raise ValueError(
1897
+ "`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. "
1898
+ "Call `AutoProcessor` with `return_codebook_pixels` set to True"
1899
+ )
1900
+ mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
1901
+ # Unimodal MIM Loss
1902
+ # If multimodal embeddings are present, we will calculate MMM loss
1903
+ if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None:
1904
+ sequence_for_image = image_masked_embeddings
1905
+
1906
+ if mim_labels is not None:
1907
+ mim_labels = self._resize_to_2d(mim_labels)
1908
+ bool_masked_pos = self._resize_to_2d(bool_masked_pos)
1909
+ mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
1910
+
1911
+ sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :]
1912
+ masked_tokens = mim_labels.ne(self.ce_ignore_index)
1913
+ mim_labels_filtered = mim_labels[masked_tokens]
1914
+ sequence_for_image = sequence_for_image[masked_tokens, :]
1915
+ mim_logits = self.mim_head(sequence_for_image)
1916
+ if return_loss:
1917
+ mim_loss = nn.functional.cross_entropy(
1918
+ mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
1919
+ )
1920
+ mim_loss *= self.mim_weight
1921
+ else:
1922
+ mim_logits = self.mim_head(sequence_for_image)
1923
+
1924
+ # Unimodal MLM Loss
1925
+ if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None:
1926
+ sequence_for_text = text_masked_embeddings
1927
+ if mlm_labels is not None:
1928
+ mlm_labels = self._resize_to_2d(mlm_labels)
1929
+ sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :]
1930
+ masked_tokens = mlm_labels.ne(self.ce_ignore_index)
1931
+ mlm_labels_filtered = mlm_labels[masked_tokens]
1932
+ sequence_for_text = sequence_for_text[masked_tokens, :]
1933
+ mlm_logits = self.mlm_head(sequence_for_text)
1934
+ if return_loss:
1935
+ mlm_loss = nn.functional.cross_entropy(
1936
+ mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
1937
+ )
1938
+ mlm_loss *= self.mlm_weight
1939
+ else:
1940
+ mlm_logits = self.mlm_head(sequence_for_text)
1941
+
1942
+ # ITM Loss
1943
+ if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
1944
+ itm_logits = self.itm_head(multimodal_masked_embeddings)
1945
+
1946
+ if itm_labels is not None:
1947
+ pos_pairs = itm_labels.ne(0)
1948
+ pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
1949
+ if return_loss:
1950
+ itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
1951
+ itm_loss *= self.itm_weight
1952
+
1953
+ if multimodal_masked_embeddings is not None:
1954
+ multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
1955
+
1956
+ if mlm_labels is not None:
1957
+ mlm_labels = mlm_labels[pos_mask]
1958
+
1959
+ if mim_labels is not None:
1960
+ mim_labels = mim_labels[pos_mask]
1961
+ bool_masked_pos = bool_masked_pos[pos_mask]
1962
+
1963
+ # MMM Image Loss
1964
+ if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
1965
+ sequence_for_image = multimodal_masked_embeddings
1966
+ end_index = image_masked_embeddings.size(1) - 1
1967
+ sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :]
1968
+
1969
+ if mim_labels is not None:
1970
+ mim_labels = self._resize_to_2d(mim_labels)
1971
+ bool_masked_pos = self._resize_to_2d(bool_masked_pos)
1972
+ mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
1973
+
1974
+ masked_tokens = mim_labels.ne(self.ce_ignore_index)
1975
+ mim_labels_filtered = mim_labels[masked_tokens]
1976
+ sequence_for_image = sequence_for_image[masked_tokens, :]
1977
+ mmm_image_logits = self.mmm_image_head(sequence_for_image)
1978
+ if return_loss:
1979
+ mmm_image_loss = nn.functional.cross_entropy(
1980
+ mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
1981
+ )
1982
+ mmm_image_loss *= self.mmm_image_weight
1983
+ else:
1984
+ mmm_image_logits = self.mmm_image_head(sequence_for_image)
1985
+
1986
+ # MMM Text Loss
1987
+ if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
1988
+ sequence_for_text = multimodal_masked_embeddings
1989
+ sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :]
1990
+
1991
+ if mlm_labels is not None:
1992
+ mlm_labels = self._resize_to_2d(mlm_labels)
1993
+ masked_tokens = mlm_labels.ne(self.ce_ignore_index)
1994
+ mlm_labels_filtered = mlm_labels[masked_tokens]
1995
+ sequence_for_text = sequence_for_text[masked_tokens, :]
1996
+ mmm_text_logits = self.mmm_text_head(sequence_for_text)
1997
+ if return_loss:
1998
+ mmm_text_loss = nn.functional.cross_entropy(
1999
+ mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
2000
+ )
2001
+ mmm_text_loss *= self.mmm_text_weight
2002
+ else:
2003
+ mmm_text_logits = self.mmm_text_head(sequence_for_text)
2004
+
2005
+ # Global Contrastive Loss
2006
+ if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0:
2007
+ text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
2008
+ text_embedding = nn.functional.normalize(text_embedding, dim=-1)
2009
+
2010
+ image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
2011
+ image_embedding = nn.functional.normalize(image_embedding, dim=-1)
2012
+
2013
+ self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
2014
+
2015
+ logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(
2016
+ image_embedding, text_embedding, self.flava.logit_scale
2017
+ )
2018
+
2019
+ # Apply ITM negative mask if any
2020
+ if pos_mask is not None:
2021
+ logits_per_image = logits_per_image[pos_mask]
2022
+ logits_per_text = logits_per_text[pos_mask]
2023
+ gc_labels = gc_labels[pos_mask]
2024
+
2025
+ if return_loss:
2026
+ gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
2027
+ gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
2028
+ gc_loss = (gc_loss_image + gc_loss_text) / 2
2029
+ gc_loss *= self.global_contrastive_weight
2030
+
2031
+ flava_losses = FlavaLosses(
2032
+ mim=mim_loss,
2033
+ mlm=mlm_loss,
2034
+ itm=itm_loss,
2035
+ global_contrastive=gc_loss,
2036
+ mmm_image=mmm_image_loss,
2037
+ mmm_text=mmm_text_loss,
2038
+ )
2039
+
2040
+ if return_loss and not flava_losses.all_none():
2041
+ total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values())
2042
+
2043
+ if not return_dict:
2044
+ output = (
2045
+ image_embeddings,
2046
+ flava_output.image_output.to_tuple() if flava_output.image_output is not None else None,
2047
+ text_embeddings,
2048
+ flava_output.text_output.to_tuple() if flava_output.text_output is not None else None,
2049
+ flava_output.multimodal_embeddings,
2050
+ flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None,
2051
+ image_masked_embeddings,
2052
+ flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None,
2053
+ text_masked_embeddings,
2054
+ flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None,
2055
+ multimodal_masked_embeddings,
2056
+ flava_masked_output.multimodal_output.to_tuple()
2057
+ if flava_masked_output.multimodal_output is not None
2058
+ else None,
2059
+ mim_logits,
2060
+ mlm_logits,
2061
+ itm_logits,
2062
+ logits_per_image,
2063
+ logits_per_image,
2064
+ mmm_image_logits,
2065
+ mmm_text_logits,
2066
+ )
2067
+ if return_loss and not flava_losses.all_none():
2068
+ output = (
2069
+ total_loss,
2070
+ flava_losses,
2071
+ ) + output
2072
+
2073
+ # Filter None as transformer by default won't handle it
2074
+ return tuple(x for x in output if x is None)
2075
+
2076
+ return FlavaForPreTrainingOutput(
2077
+ loss=total_loss,
2078
+ loss_info=flava_losses,
2079
+ image_embeddings=image_embeddings,
2080
+ image_output=flava_output.image_output,
2081
+ text_embeddings=text_embeddings,
2082
+ text_output=flava_output.text_output,
2083
+ multimodal_embeddings=flava_output.multimodal_embeddings,
2084
+ multimodal_output=flava_output.multimodal_output,
2085
+ image_masked_embeddings=image_masked_embeddings,
2086
+ image_masked_output=flava_masked_output.image_output,
2087
+ text_masked_embeddings=text_masked_embeddings,
2088
+ text_masked_output=flava_masked_output.text_output,
2089
+ multimodal_masked_embeddings=multimodal_masked_embeddings,
2090
+ multimodal_masked_output=flava_masked_output.multimodal_output,
2091
+ mim_logits=mim_logits,
2092
+ mlm_logits=mlm_logits,
2093
+ itm_logits=itm_logits,
2094
+ contrastive_logits_per_image=logits_per_image,
2095
+ contrastive_logits_per_text=logits_per_text,
2096
+ mmm_image_logits=mmm_image_logits,
2097
+ mmm_text_logits=mmm_text_logits,
2098
+ )
venv/lib/python3.10/site-packages/transformers/models/flava/processing_flava.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for FLAVA
17
+ """
18
+
19
+ import warnings
20
+ from typing import List, Optional, Union
21
+
22
+ from ...image_utils import ImageInput
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
25
+ from ...utils import TensorType
26
+
27
+
28
+ class FlavaProcessor(ProcessorMixin):
29
+ r"""
30
+ Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
31
+
32
+ [`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
33
+ [`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
34
+
35
+ Args:
36
+ image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
37
+ tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
38
+ """
39
+
40
+ attributes = ["image_processor", "tokenizer"]
41
+ image_processor_class = "FlavaImageProcessor"
42
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
43
+
44
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
45
+ feature_extractor = None
46
+ if "feature_extractor" in kwargs:
47
+ warnings.warn(
48
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
49
+ " instead.",
50
+ FutureWarning,
51
+ )
52
+ feature_extractor = kwargs.pop("feature_extractor")
53
+
54
+ image_processor = image_processor if image_processor is not None else feature_extractor
55
+ if image_processor is None:
56
+ raise ValueError("You need to specify an `image_processor`.")
57
+ if tokenizer is None:
58
+ raise ValueError("You need to specify a `tokenizer`.")
59
+
60
+ super().__init__(image_processor, tokenizer)
61
+ self.current_processor = self.image_processor
62
+
63
+ def __call__(
64
+ self,
65
+ images: Optional[ImageInput] = None,
66
+ text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
67
+ add_special_tokens: bool = True,
68
+ padding: Union[bool, str, PaddingStrategy] = False,
69
+ truncation: Union[bool, str, TruncationStrategy] = False,
70
+ max_length: Optional[int] = None,
71
+ stride: int = 0,
72
+ pad_to_multiple_of: Optional[int] = None,
73
+ return_image_mask: Optional[bool] = None,
74
+ return_codebook_pixels: Optional[bool] = None,
75
+ return_token_type_ids: Optional[bool] = None,
76
+ return_attention_mask: Optional[bool] = None,
77
+ return_overflowing_tokens: bool = False,
78
+ return_special_tokens_mask: bool = False,
79
+ return_offsets_mapping: bool = False,
80
+ return_length: bool = False,
81
+ verbose: bool = True,
82
+ return_tensors: Optional[Union[str, TensorType]] = None,
83
+ **kwargs,
84
+ ):
85
+ """
86
+ This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
87
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
88
+
89
+ Please refer to the docstring of the above two methods for more information.
90
+ """
91
+
92
+ if text is None and images is None:
93
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
94
+
95
+ if text is not None:
96
+ encoding = self.tokenizer(
97
+ text=text,
98
+ add_special_tokens=add_special_tokens,
99
+ padding=padding,
100
+ truncation=truncation,
101
+ max_length=max_length,
102
+ stride=stride,
103
+ pad_to_multiple_of=pad_to_multiple_of,
104
+ return_token_type_ids=return_token_type_ids,
105
+ return_attention_mask=return_attention_mask,
106
+ return_overflowing_tokens=return_overflowing_tokens,
107
+ return_special_tokens_mask=return_special_tokens_mask,
108
+ return_offsets_mapping=return_offsets_mapping,
109
+ return_length=return_length,
110
+ verbose=verbose,
111
+ return_tensors=return_tensors,
112
+ **kwargs,
113
+ )
114
+ if images is not None:
115
+ image_features = self.image_processor(
116
+ images,
117
+ return_image_mask=return_image_mask,
118
+ return_codebook_pixels=return_codebook_pixels,
119
+ return_tensors=return_tensors,
120
+ **kwargs,
121
+ )
122
+
123
+ if text is not None and images is not None:
124
+ encoding.update(image_features)
125
+ return encoding
126
+ elif text is not None:
127
+ return encoding
128
+ else:
129
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
130
+
131
+ def batch_decode(self, *args, **kwargs):
132
+ """
133
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
134
+ refer to the docstring of this method for more information.
135
+ """
136
+ return self.tokenizer.batch_decode(*args, **kwargs)
137
+
138
+ def decode(self, *args, **kwargs):
139
+ """
140
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
141
+ the docstring of this method for more information.
142
+ """
143
+ return self.tokenizer.decode(*args, **kwargs)
144
+
145
+ @property
146
+ def model_input_names(self):
147
+ tokenizer_input_names = self.tokenizer.model_input_names
148
+ image_processor_input_names = self.image_processor.model_input_names
149
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
150
+
151
+ @property
152
+ def feature_extractor_class(self):
153
+ warnings.warn(
154
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
155
+ FutureWarning,
156
+ )
157
+ return self.image_processor_class
158
+
159
+ @property
160
+ def feature_extractor(self):
161
+ warnings.warn(
162
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
163
+ FutureWarning,
164
+ )
165
+ return self.image_processor
venv/lib/python3.10/site-packages/transformers/models/led/__init__.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_tf_available,
20
+ is_tokenizers_available,
21
+ is_torch_available,
22
+ )
23
+
24
+
25
+ _import_structure = {
26
+ "configuration_led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig"],
27
+ "tokenization_led": ["LEDTokenizer"],
28
+ }
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_led_fast"] = ["LEDTokenizerFast"]
37
+
38
+ try:
39
+ if not is_torch_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_led"] = [
45
+ "LED_PRETRAINED_MODEL_ARCHIVE_LIST",
46
+ "LEDForConditionalGeneration",
47
+ "LEDForQuestionAnswering",
48
+ "LEDForSequenceClassification",
49
+ "LEDModel",
50
+ "LEDPreTrainedModel",
51
+ ]
52
+
53
+
54
+ try:
55
+ if not is_tf_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ _import_structure["modeling_tf_led"] = ["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]
61
+
62
+
63
+ if TYPE_CHECKING:
64
+ from .configuration_led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig
65
+ from .tokenization_led import LEDTokenizer
66
+
67
+ try:
68
+ if not is_tokenizers_available():
69
+ raise OptionalDependencyNotAvailable()
70
+ except OptionalDependencyNotAvailable:
71
+ pass
72
+ else:
73
+ from .tokenization_led_fast import LEDTokenizerFast
74
+
75
+ try:
76
+ if not is_torch_available():
77
+ raise OptionalDependencyNotAvailable()
78
+ except OptionalDependencyNotAvailable:
79
+ pass
80
+ else:
81
+ from .modeling_led import (
82
+ LED_PRETRAINED_MODEL_ARCHIVE_LIST,
83
+ LEDForConditionalGeneration,
84
+ LEDForQuestionAnswering,
85
+ LEDForSequenceClassification,
86
+ LEDModel,
87
+ LEDPreTrainedModel,
88
+ )
89
+
90
+ try:
91
+ if not is_tf_available():
92
+ raise OptionalDependencyNotAvailable()
93
+ except OptionalDependencyNotAvailable:
94
+ pass
95
+ else:
96
+ from .modeling_tf_led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel
97
+
98
+ else:
99
+ import sys
100
+
101
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.52 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/configuration_led.cpython-310.pyc ADDED
Binary file (6.29 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/modeling_led.cpython-310.pyc ADDED
Binary file (91.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/modeling_tf_led.cpython-310.pyc ADDED
Binary file (76.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/tokenization_led.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/__pycache__/tokenization_led_fast.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/led/configuration_led.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LED model configuration"""
16
+
17
+ from typing import List, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import LED_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class LEDConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the LED
34
+ [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 50265):
42
+ Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`].
44
+ d_model (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the layers and the pooler layer.
46
+ encoder_layers (`int`, *optional*, defaults to 12):
47
+ Number of encoder layers.
48
+ decoder_layers (`int`, *optional*, defaults to 12):
49
+ Number of decoder layers.
50
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
53
+ Number of attention heads for each attention layer in the Transformer decoder.
54
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
55
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
56
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
57
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
58
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
59
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
60
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
61
+ dropout (`float`, *optional*, defaults to 0.1):
62
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ activation_dropout (`float`, *optional*, defaults to 0.0):
66
+ The dropout ratio for activations inside the fully connected layer.
67
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
68
+ The dropout ratio for classifier.
69
+ max_encoder_position_embeddings (`int`, *optional*, defaults to 16384):
70
+ The maximum sequence length that the encoder might ever be used with.
71
+ max_decoder_position_embeddings (`int`, *optional*, defaults to 16384):
72
+ The maximum sequence length that the decoder might ever be used with.
73
+ init_std (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
76
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
77
+ for more details.
78
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
79
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
80
+ for more details.
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models)
83
+
84
+ Example:
85
+
86
+ ```python
87
+ >>> from transformers import LEDModel, LEDConfig
88
+
89
+ >>> # Initializing a LED allenai/led-base-16384 style configuration
90
+ >>> configuration = LEDConfig()
91
+
92
+ >>> # Initializing a model from the allenai/led-base-16384 style configuration
93
+ >>> model = LEDModel(configuration)
94
+
95
+ >>> # Accessing the model configuration
96
+ >>> configuration = model.config
97
+ ```"""
98
+
99
+ model_type = "led"
100
+ attribute_map = {
101
+ "num_attention_heads": "encoder_attention_heads",
102
+ "hidden_size": "d_model",
103
+ "attention_probs_dropout_prob": "attention_dropout",
104
+ "initializer_range": "init_std",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=50265,
110
+ max_encoder_position_embeddings=16384,
111
+ max_decoder_position_embeddings=1024,
112
+ encoder_layers=12,
113
+ encoder_ffn_dim=4096,
114
+ encoder_attention_heads=16,
115
+ decoder_layers=12,
116
+ decoder_ffn_dim=4096,
117
+ decoder_attention_heads=16,
118
+ encoder_layerdrop=0.0,
119
+ decoder_layerdrop=0.0,
120
+ use_cache=True,
121
+ is_encoder_decoder=True,
122
+ activation_function="gelu",
123
+ d_model=1024,
124
+ dropout=0.1,
125
+ attention_dropout=0.0,
126
+ activation_dropout=0.0,
127
+ init_std=0.02,
128
+ decoder_start_token_id=2,
129
+ classifier_dropout=0.0,
130
+ pad_token_id=1,
131
+ bos_token_id=0,
132
+ eos_token_id=2,
133
+ attention_window: Union[List[int], int] = 512,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_encoder_position_embeddings = max_encoder_position_embeddings
138
+ self.max_decoder_position_embeddings = max_decoder_position_embeddings
139
+ self.d_model = d_model
140
+ self.encoder_ffn_dim = encoder_ffn_dim
141
+ self.encoder_layers = encoder_layers
142
+ self.encoder_attention_heads = encoder_attention_heads
143
+ self.decoder_ffn_dim = decoder_ffn_dim
144
+ self.decoder_layers = decoder_layers
145
+ self.decoder_attention_heads = decoder_attention_heads
146
+ self.dropout = dropout
147
+ self.attention_dropout = attention_dropout
148
+ self.activation_dropout = activation_dropout
149
+ self.activation_function = activation_function
150
+ self.init_std = init_std
151
+ self.encoder_layerdrop = encoder_layerdrop
152
+ self.decoder_layerdrop = decoder_layerdrop
153
+ self.classifier_dropout = classifier_dropout
154
+ self.use_cache = use_cache
155
+ self.num_hidden_layers = encoder_layers
156
+ self.attention_window = attention_window
157
+
158
+ super().__init__(
159
+ pad_token_id=pad_token_id,
160
+ bos_token_id=bos_token_id,
161
+ eos_token_id=eos_token_id,
162
+ is_encoder_decoder=is_encoder_decoder,
163
+ decoder_start_token_id=decoder_start_token_id,
164
+ **kwargs,
165
+ )
venv/lib/python3.10/site-packages/transformers/models/led/modeling_led.py ADDED
The diff for this file is too large to render. See raw diff