applied-ai-018 commited on
Commit
f642bd9
·
verified ·
1 Parent(s): 1bc5244

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/config.yaml +43 -0
  2. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log +42 -0
  3. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/requirements.txt +163 -0
  4. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-metadata.json +810 -0
  5. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-summary.json +1 -0
  6. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug-internal.log +181 -0
  7. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug.log +29 -0
  8. lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/run-yu37vekm.wandb +0 -0
  9. lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/run-pxpzv850.wandb +0 -0
  10. lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug-internal.log +137 -0
  11. lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug.log +28 -0
  12. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/config.yaml +43 -0
  13. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log +34 -0
  14. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/requirements.txt +155 -0
  15. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-metadata.json +850 -0
  16. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-summary.json +1 -0
  17. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug-internal.log +183 -0
  18. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug.log +29 -0
  19. lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/run-qlxjd76q.wandb +0 -0
  20. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/config.yaml +43 -0
  21. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log +34 -0
  22. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/requirements.txt +155 -0
  23. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-metadata.json +850 -0
  24. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-summary.json +1 -0
  25. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug-internal.log +185 -0
  26. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug.log +29 -0
  27. lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/run-2cqsvyos.wandb +0 -0
  28. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/config.yaml +375 -0
  29. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/media/table/evaluation/eval_results_1_6529e3311149275b8699.table.json +1 -0
  30. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/output.log +805 -0
  31. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/requirements.txt +154 -0
  32. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/wandb-metadata.json +850 -0
  33. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/wandb-summary.json +1 -0
  34. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug-internal.log +0 -0
  35. lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug.log +36 -0
  36. venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/configuration_align.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/convert_align_tf_to_hf.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/modeling_align.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/processing_align.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/align/configuration_align.py +383 -0
  41. venv/lib/python3.10/site-packages/transformers/models/align/modeling_align.py +1633 -0
  42. venv/lib/python3.10/site-packages/transformers/models/align/processing_align.py +121 -0
  43. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/configuration_conditional_detr.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/feature_extraction_conditional_detr.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/modeling_conditional_detr.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/configuration_conditional_detr.py +273 -0
  50. venv/lib/python3.10/site-packages/transformers/models/conditional_detr/image_processing_conditional_detr.py +1777 -0
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.40.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1715682653
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.40.2
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-14:10:30:54,403 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-14:10:30:58,879 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi']
4
+ 2024-05-14:10:30:58,881 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-14:10:30:58,881 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'}
6
+ Traceback (most recent call last):
7
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 398, in cached_file
8
+ resolved_file = hf_hub_download(
9
+ File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn
10
+ validate_repo_id(arg_value)
11
+ File "/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id
12
+ raise HFValidationError(
13
+ huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/data/cronscript/ckpts//hf_ckpt//global_step120'. Use `repo_type` argument if needed.
14
+ The above exception was the direct cause of the following exception:
15
+ Traceback (most recent call last):
16
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
17
+ return _run_code(code, main_globals, None,
18
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
19
+ exec(code, run_globals)
20
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
21
+ cli_evaluate()
22
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
23
+ results = evaluator.simple_evaluate(
24
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
25
+ return fn(*args, **kwargs)
26
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
27
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
28
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
29
+ return cls(**args, **args2)
30
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
31
+ self._get_config(
32
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
33
+ self._config = transformers.AutoConfig.from_pretrained(
34
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained
35
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
36
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict
37
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
38
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict
39
+ resolved_config_file = cached_file(
40
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 462, in cached_file
41
+ raise EnvironmentError(
42
+ OSError: Incorrect path_or_model_id: '/data/cronscript/ckpts//hf_ckpt//global_step120'. Please provide either the path to a local folder or the repo_id of a model on the Hub.
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/requirements.txt ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.3
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.2
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.4
12
+ aiosignal==1.3.1
13
+ antlr4-python3-runtime==4.9.3
14
+ anyio==4.3.0
15
+ async-timeout==4.0.3
16
+ attrs==23.2.0
17
+ av==9.2.0
18
+ cachetools==5.3.3
19
+ certifi==2024.2.2
20
+ cffi==1.15.1
21
+ cfgv==3.4.0
22
+ chardet==5.2.0
23
+ charset-normalizer==3.3.2
24
+ click==8.1.7
25
+ cmake==3.29.2
26
+ colorama==0.4.6
27
+ datasets==2.19.1
28
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
29
+ dill==0.3.8
30
+ distlib==0.3.8
31
+ distro==1.9.0
32
+ docker-pycreds==0.4.0
33
+ einops==0.8.0
34
+ evaluate==0.4.2
35
+ exceptiongroup==1.2.0
36
+ expecttest==0.2.1
37
+ filelock==3.13.4
38
+ frozenlist==1.4.1
39
+ fsspec==2024.3.1
40
+ gitdb==4.0.11
41
+ google-auth-oauthlib==0.4.6
42
+ google-auth==2.29.0
43
+ grpcio==1.62.1
44
+ h11==0.14.0
45
+ habana-media-loader==1.15.1.15
46
+ habana-pyhlml==1.15.1.15
47
+ habana-torch-dataloader==1.15.1.15
48
+ habana-torch-plugin==1.15.1.15
49
+ habana_gpu_migration==1.15.1.15
50
+ habana_quantization_toolkit==1.15.1.15
51
+ hjson==3.1.0
52
+ httpcore==1.0.5
53
+ httpx==0.27.0
54
+ huggingface-hub==0.23.0
55
+ identify==2.5.35
56
+ idna==3.7
57
+ importlib_resources==6.4.0
58
+ iniconfig==2.0.0
59
+ joblib==1.4.2
60
+ jsonlines==4.0.0
61
+ lightning-habana==1.4.0
62
+ lightning-utilities==0.11.2
63
+ lightning==2.2.0.post0
64
+ lm_eval==0.3.0
65
+ lm_eval==0.4.2
66
+ lm_eval==0.4.2
67
+ lm_eval==0.4.2
68
+ mbstrdecoder==1.1.3
69
+ more-itertools==10.2.0
70
+ mpi4py==3.1.4
71
+ mpmath==1.3.0
72
+ multidict==6.0.5
73
+ multiprocess==0.70.16
74
+ networkx==3.3
75
+ ninja==1.11.1.1
76
+ nltk==3.8.1
77
+ nodeenv==1.8.0
78
+ numexpr==2.10.0
79
+ numpy==1.23.5
80
+ oauthlib==3.2.2
81
+ omegaconf==2.3.0
82
+ openai==1.29.0
83
+ packaging==24.0
84
+ pandas==2.0.1
85
+ pathspec==0.12.1
86
+ pathvalidate==3.2.0
87
+ peft==0.10.0
88
+ perfetto==0.7.0
89
+ pip==22.0.2
90
+ pip==23.3.1
91
+ platformdirs==4.2.0
92
+ pluggy==1.4.0
93
+ portalocker==2.8.2
94
+ pre-commit==3.3.3
95
+ protobuf==3.20.3
96
+ psutil==5.9.8
97
+ py-cpuinfo==9.0.0
98
+ pyarrow-hotfix==0.6
99
+ pyarrow==16.0.0
100
+ pyasn1==0.6.0
101
+ pyasn1_modules==0.4.0
102
+ pybind11==2.10.4
103
+ pycountry==23.12.11
104
+ pycparser==2.22
105
+ pydantic==1.10.13
106
+ pynvml==8.0.4
107
+ pytablewriter==1.2.0
108
+ pytest==8.1.1
109
+ python-dateutil==2.9.0.post0
110
+ pytorch-lightning==2.2.2
111
+ pytz==2024.1
112
+ regex==2023.5.5
113
+ requests-oauthlib==2.0.0
114
+ requests==2.31.0
115
+ rouge_score==0.1.2
116
+ rsa==4.9
117
+ sacrebleu==1.5.0
118
+ safetensors==0.4.3
119
+ scikit-learn==1.4.2
120
+ scipy==1.13.0
121
+ sentencepiece==0.2.0
122
+ sentry-sdk==2.1.1
123
+ setproctitle==1.3.3
124
+ setuptools==59.6.0
125
+ setuptools==69.5.1
126
+ six==1.16.0
127
+ smmap==5.0.1
128
+ sniffio==1.3.1
129
+ sqlitedict==2.1.0
130
+ symengine==0.11.0
131
+ sympy==1.12
132
+ tabledata==1.3.3
133
+ tcolorpy==0.1.6
134
+ tdqm==0.0.1
135
+ tensorboard-data-server==0.6.1
136
+ tensorboard-plugin-wit==1.8.1
137
+ tensorboard==2.11.2
138
+ threadpoolctl==3.5.0
139
+ tokenizers==0.19.1
140
+ tomli==2.0.1
141
+ torch==2.2.0a0+git8964477
142
+ torch_tb_profiler==0.4.0
143
+ torchaudio==2.2.0+08901ad
144
+ torchdata==0.7.1+5e6f7b7
145
+ torchmetrics==1.3.2
146
+ torchtext==0.17.0+400da5c
147
+ torchvision==0.17.0+b2383d4
148
+ tqdm-multiprocess==0.0.11
149
+ tqdm==4.66.2
150
+ transformers==4.40.2
151
+ typepy==1.3.2
152
+ typing_extensions==4.11.0
153
+ tzdata==2024.1
154
+ urllib3==1.26.18
155
+ virtualenv==20.25.1
156
+ wandb==0.17.0
157
+ wheel==0.37.1
158
+ wheel==0.43.0
159
+ word2number==1.1
160
+ xxhash==3.4.1
161
+ yamllint==1.35.1
162
+ yarl==1.9.4
163
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T10:30:54.266112",
5
+ "startedAt": "2024-05-14T10:30:53.886264",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3394.3469736842103,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3332.668,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3332.543,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3316.868,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3400.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3400.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3400.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3400.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3330.683,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3400.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3400.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3400.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3400.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3400.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3400.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3400.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3400.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3400.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3358.507,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3400.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3400.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3400.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3400.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3400.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3400.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3318.849,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3400.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3400.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3400.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3400.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3400.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3400.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3400.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 76.92241668701172
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 5}}
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug-internal.log ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 10:30:53,898 INFO StreamThr :8236 [internal.py:wandb_internal():85] W&B internal server running at pid: 8236, started at: 2024-05-14 10:30:53.897695
2
+ 2024-05-14 10:30:53,900 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 10:30:53,901 INFO WriterThread:8236 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/run-yu37vekm.wandb
4
+ 2024-05-14 10:30:53,901 DEBUG SenderThread:8236 [sender.py:send():378] send: header
5
+ 2024-05-14 10:30:53,911 DEBUG SenderThread:8236 [sender.py:send():378] send: run
6
+ 2024-05-14 10:30:54,125 INFO SenderThread:8236 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files
7
+ 2024-05-14 10:30:54,125 INFO SenderThread:8236 [sender.py:_start_run_threads():1123] run started: yu37vekm with start time 1715682653.897371
8
+ 2024-05-14 10:30:54,132 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-14 10:30:54,132 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-14 10:30:54,215 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-14 10:30:54,216 DEBUG HandlerThread:8236 [system_info.py:__init__():26] System info init
12
+ 2024-05-14 10:30:54,216 DEBUG HandlerThread:8236 [system_info.py:__init__():41] System info init done
13
+ 2024-05-14 10:30:54,216 INFO HandlerThread:8236 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-14 10:30:54,217 INFO SystemMonitor:8236 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-14 10:30:54,217 INFO HandlerThread:8236 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-14 10:30:54,217 INFO SystemMonitor:8236 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-14 10:30:54,217 INFO SystemMonitor:8236 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-14 10:30:54,218 INFO SystemMonitor:8236 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-14 10:30:54,218 INFO SystemMonitor:8236 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-14 10:30:54,266 DEBUG HandlerThread:8236 [system_info.py:probe():150] Probing system
21
+ 2024-05-14 10:30:54,274 DEBUG HandlerThread:8236 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-14 10:30:54,299 ERROR HandlerThread:8236 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /data/cronscript/lm-evaluation-harness'
28
+ 2024-05-14 10:30:54,299 DEBUG HandlerThread:8236 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-14 10:30:54,300 DEBUG HandlerThread:8236 [system_info.py:probe():198] Probing system done
30
+ 2024-05-14 10:30:54,300 DEBUG HandlerThread:8236 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T10:30:54.266112', 'startedAt': '2024-05-14T10:30:53.886264', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.3469736842103, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3332.668, 'min': 800.0, 'max': 3400.0}, {'current': 3332.543, 'min': 800.0, 'max': 3400.0}, {'current': 3316.868, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3330.683, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3358.507, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3318.849, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 76.92241668701172}}, 'memory': {'total': 1007.5000267028809}}
31
+ 2024-05-14 10:30:54,300 INFO HandlerThread:8236 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-14 10:30:54,300 INFO HandlerThread:8236 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-14 10:30:54,301 INFO HandlerThread:8236 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-14 10:30:54,305 DEBUG SenderThread:8236 [sender.py:send():378] send: files
35
+ 2024-05-14 10:30:54,305 INFO SenderThread:8236 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-14 10:30:54,400 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-14 10:30:54,400 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: stop_status
38
+ 2024-05-14 10:30:54,400 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: python_packages
39
+ 2024-05-14 10:30:54,401 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-14 10:30:54,631 DEBUG SenderThread:8236 [sender.py:send():378] send: telemetry
41
+ 2024-05-14 10:30:54,812 INFO wandb-upload_0:8236 [upload_job.py:push():130] Uploaded file /tmp/tmp7gniqj8owandb/oxues1px-wandb-metadata.json
42
+ 2024-05-14 10:30:55,127 INFO Thread-12 :8236 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log
43
+ 2024-05-14 10:30:55,127 INFO Thread-12 :8236 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/requirements.txt
44
+ 2024-05-14 10:30:55,127 INFO Thread-12 :8236 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-metadata.json
45
+ 2024-05-14 10:30:57,127 INFO Thread-12 :8236 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log
46
+ 2024-05-14 10:30:59,882 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-14 10:30:59,949 DEBUG SenderThread:8236 [sender.py:send():378] send: exit
48
+ 2024-05-14 10:30:59,949 INFO SenderThread:8236 [sender.py:send_exit():585] handling exit code: 1
49
+ 2024-05-14 10:30:59,949 INFO SenderThread:8236 [sender.py:send_exit():587] handling runtime: 5
50
+ 2024-05-14 10:30:59,950 INFO SenderThread:8236 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
51
+ 2024-05-14 10:30:59,950 INFO SenderThread:8236 [sender.py:send_exit():593] send defer
52
+ 2024-05-14 10:30:59,951 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
53
+ 2024-05-14 10:30:59,951 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 0
54
+ 2024-05-14 10:30:59,951 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
55
+ 2024-05-14 10:30:59,951 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 0
56
+ 2024-05-14 10:30:59,951 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 1
57
+ 2024-05-14 10:30:59,951 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
58
+ 2024-05-14 10:30:59,951 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 1
59
+ 2024-05-14 10:30:59,951 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
60
+ 2024-05-14 10:30:59,951 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 1
61
+ 2024-05-14 10:30:59,951 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 2
62
+ 2024-05-14 10:30:59,951 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
63
+ 2024-05-14 10:30:59,951 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 2
64
+ 2024-05-14 10:30:59,951 INFO HandlerThread:8236 [system_monitor.py:finish():203] Stopping system monitor
65
+ 2024-05-14 10:30:59,952 INFO HandlerThread:8236 [interfaces.py:finish():200] Joined cpu monitor
66
+ 2024-05-14 10:30:59,952 DEBUG SystemMonitor:8236 [system_monitor.py:_start():172] Starting system metrics aggregation loop
67
+ 2024-05-14 10:30:59,952 INFO HandlerThread:8236 [interfaces.py:finish():200] Joined disk monitor
68
+ 2024-05-14 10:30:59,952 DEBUG SystemMonitor:8236 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-14 10:30:59,952 INFO HandlerThread:8236 [interfaces.py:finish():200] Joined memory monitor
70
+ 2024-05-14 10:30:59,952 DEBUG SystemMonitor:8236 [system_monitor.py:_start():183] Publishing last batch of metrics
71
+ 2024-05-14 10:30:59,952 INFO HandlerThread:8236 [interfaces.py:finish():200] Joined network monitor
72
+ 2024-05-14 10:30:59,954 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
73
+ 2024-05-14 10:30:59,954 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 2
74
+ 2024-05-14 10:30:59,954 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 3
75
+ 2024-05-14 10:30:59,954 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
76
+ 2024-05-14 10:30:59,954 DEBUG SenderThread:8236 [sender.py:send():378] send: stats
77
+ 2024-05-14 10:30:59,954 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 3
78
+ 2024-05-14 10:30:59,955 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
79
+ 2024-05-14 10:30:59,955 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 3
80
+ 2024-05-14 10:30:59,955 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 4
81
+ 2024-05-14 10:30:59,955 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
82
+ 2024-05-14 10:30:59,955 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 4
83
+ 2024-05-14 10:30:59,955 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
84
+ 2024-05-14 10:30:59,955 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 4
85
+ 2024-05-14 10:30:59,955 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 5
86
+ 2024-05-14 10:30:59,955 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
87
+ 2024-05-14 10:30:59,955 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 5
88
+ 2024-05-14 10:30:59,956 DEBUG SenderThread:8236 [sender.py:send():378] send: summary
89
+ 2024-05-14 10:30:59,956 INFO SenderThread:8236 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
90
+ 2024-05-14 10:30:59,956 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
91
+ 2024-05-14 10:30:59,956 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 5
92
+ 2024-05-14 10:30:59,956 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 6
93
+ 2024-05-14 10:30:59,957 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
94
+ 2024-05-14 10:30:59,957 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 6
95
+ 2024-05-14 10:30:59,957 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
96
+ 2024-05-14 10:30:59,957 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 6
97
+ 2024-05-14 10:30:59,959 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: status_report
98
+ 2024-05-14 10:31:00,025 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 7
99
+ 2024-05-14 10:31:00,026 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
100
+ 2024-05-14 10:31:00,026 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 7
101
+ 2024-05-14 10:31:00,026 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
102
+ 2024-05-14 10:31:00,026 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 7
103
+ 2024-05-14 10:31:00,129 INFO Thread-12 :8236 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/config.yaml
104
+ 2024-05-14 10:31:00,129 INFO Thread-12 :8236 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-summary.json
105
+ 2024-05-14 10:31:00,642 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 8
106
+ 2024-05-14 10:31:00,642 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
107
+ 2024-05-14 10:31:00,643 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 8
108
+ 2024-05-14 10:31:00,643 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
109
+ 2024-05-14 10:31:00,643 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 8
110
+ 2024-05-14 10:31:00,643 INFO SenderThread:8236 [job_builder.py:build():432] Attempting to build job artifact
111
+ 2024-05-14 10:31:00,643 INFO SenderThread:8236 [job_builder.py:_get_source_type():576] no source found
112
+ 2024-05-14 10:31:00,643 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 9
113
+ 2024-05-14 10:31:00,644 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
114
+ 2024-05-14 10:31:00,644 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 9
115
+ 2024-05-14 10:31:00,644 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
116
+ 2024-05-14 10:31:00,644 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 9
117
+ 2024-05-14 10:31:00,644 INFO SenderThread:8236 [dir_watcher.py:finish():358] shutting down directory watcher
118
+ 2024-05-14 10:31:00,949 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: poll_exit
119
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log
120
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files
121
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/config.yaml config.yaml
122
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/requirements.txt requirements.txt
123
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log output.log
124
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-summary.json wandb-summary.json
125
+ 2024-05-14 10:31:01,130 INFO SenderThread:8236 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-metadata.json wandb-metadata.json
126
+ 2024-05-14 10:31:01,131 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 10
127
+ 2024-05-14 10:31:01,131 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: poll_exit
128
+ 2024-05-14 10:31:01,131 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
129
+ 2024-05-14 10:31:01,131 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 10
130
+ 2024-05-14 10:31:01,131 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
131
+ 2024-05-14 10:31:01,131 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 10
132
+ 2024-05-14 10:31:01,131 INFO SenderThread:8236 [file_pusher.py:finish():169] shutting down file pusher
133
+ 2024-05-14 10:31:01,406 INFO wandb-upload_0:8236 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/requirements.txt
134
+ 2024-05-14 10:31:01,539 INFO wandb-upload_1:8236 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/config.yaml
135
+ 2024-05-14 10:31:01,609 INFO wandb-upload_2:8236 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/output.log
136
+ 2024-05-14 10:31:01,610 INFO wandb-upload_3:8236 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/files/wandb-summary.json
137
+ 2024-05-14 10:31:01,810 INFO Thread-11 (_thread_body):8236 [sender.py:transition_state():613] send defer: 11
138
+ 2024-05-14 10:31:01,811 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
139
+ 2024-05-14 10:31:01,811 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 11
140
+ 2024-05-14 10:31:01,811 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
141
+ 2024-05-14 10:31:01,812 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 11
142
+ 2024-05-14 10:31:01,812 INFO SenderThread:8236 [file_pusher.py:join():175] waiting for file pusher
143
+ 2024-05-14 10:31:01,812 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 12
144
+ 2024-05-14 10:31:01,812 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
145
+ 2024-05-14 10:31:01,812 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 12
146
+ 2024-05-14 10:31:01,812 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
147
+ 2024-05-14 10:31:01,812 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 12
148
+ 2024-05-14 10:31:01,812 INFO SenderThread:8236 [file_stream.py:finish():601] file stream finish called
149
+ 2024-05-14 10:31:01,949 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: poll_exit
150
+ 2024-05-14 10:31:02,042 INFO SenderThread:8236 [file_stream.py:finish():605] file stream finish is done
151
+ 2024-05-14 10:31:02,042 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 13
152
+ 2024-05-14 10:31:02,042 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: poll_exit
153
+ 2024-05-14 10:31:02,042 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
154
+ 2024-05-14 10:31:02,042 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 13
155
+ 2024-05-14 10:31:02,042 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
156
+ 2024-05-14 10:31:02,042 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 13
157
+ 2024-05-14 10:31:02,042 INFO SenderThread:8236 [sender.py:transition_state():613] send defer: 14
158
+ 2024-05-14 10:31:02,042 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: defer
159
+ 2024-05-14 10:31:02,043 INFO HandlerThread:8236 [handler.py:handle_request_defer():184] handle defer: 14
160
+ 2024-05-14 10:31:02,043 DEBUG SenderThread:8236 [sender.py:send():378] send: final
161
+ 2024-05-14 10:31:02,043 DEBUG SenderThread:8236 [sender.py:send():378] send: footer
162
+ 2024-05-14 10:31:02,043 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: defer
163
+ 2024-05-14 10:31:02,043 INFO SenderThread:8236 [sender.py:send_request_defer():609] handle sender defer: 14
164
+ 2024-05-14 10:31:02,043 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: poll_exit
165
+ 2024-05-14 10:31:02,044 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: poll_exit
166
+ 2024-05-14 10:31:02,044 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-14 10:31:02,044 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: server_info
168
+ 2024-05-14 10:31:02,044 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: poll_exit
169
+ 2024-05-14 10:31:02,044 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: get_summary
170
+ 2024-05-14 10:31:02,044 DEBUG SenderThread:8236 [sender.py:send_request():405] send_request: server_info
171
+ 2024-05-14 10:31:02,044 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-14 10:31:02,046 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-14 10:31:02,099 INFO MainThread:8236 [wandb_run.py:_footer_history_summary_info():3994] rendering history
174
+ 2024-05-14 10:31:02,099 INFO MainThread:8236 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
175
+ 2024-05-14 10:31:02,099 INFO MainThread:8236 [wandb_run.py:_footer_sync_info():3953] logging synced files
176
+ 2024-05-14 10:31:02,099 DEBUG HandlerThread:8236 [handler.py:handle_request():158] handle_request: shutdown
177
+ 2024-05-14 10:31:02,099 INFO HandlerThread:8236 [handler.py:finish():882] shutting down handler
178
+ 2024-05-14 10:31:03,044 INFO WriterThread:8236 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/run-yu37vekm.wandb
179
+ 2024-05-14 10:31:03,099 INFO SenderThread:8236 [sender.py:finish():1545] shutting down sender
180
+ 2024-05-14 10:31:03,099 INFO SenderThread:8236 [file_pusher.py:finish():169] shutting down file pusher
181
+ 2024-05-14 10:31:03,099 INFO SenderThread:8236 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Configure stats pid to 6969
3
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-14 10:30:53,894 WARNING MainThread:6969 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug.log
11
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/logs/debug-internal.log
12
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-14 10:30:53,894 INFO MainThread:6969 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-14 10:30:53,895 INFO MainThread:6969 [wandb_init.py:init():610] starting backend
16
+ 2024-05-14 10:30:53,895 INFO MainThread:6969 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-14 10:30:53,896 INFO MainThread:6969 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-14 10:30:53,897 INFO MainThread:6969 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-14 10:30:53,899 INFO MainThread:6969 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-14 10:30:53,910 INFO MainThread:6969 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-14 10:30:54,131 INFO MainThread:6969 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-14 10:30:54,210 INFO MainThread:6969 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-14 10:30:54,211 INFO MainThread:6969 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-14 10:30:54,400 INFO MainThread:6969 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-14 10:30:54,400 INFO MainThread:6969 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-14 10:30:54,400 INFO MainThread:6969 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-14 10:30:54,401 INFO MainThread:6969 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-14 10:30:54,402 INFO MainThread:6969 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-14 10:31:03,100 WARNING MsgRouterThr:6969 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240514_103053-yu37vekm/run-yu37vekm.wandb ADDED
Binary file (12.1 kB). View file
 
lm-evaluation-harness/wandb/run-20240522_164547-pxpzv850/run-pxpzv850.wandb ADDED
Binary file (7.9 kB). View file
 
lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug-internal.log ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-22 18:49:28,214 INFO StreamThr :1570 [internal.py:wandb_internal():85] W&B internal server running at pid: 1570, started at: 2024-05-22 18:49:28.210056
2
+ 2024-05-22 18:49:28,216 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-22 18:49:28,219 INFO WriterThread:1570 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/run-kt8p2r8k.wandb
4
+ 2024-05-22 18:49:28,221 DEBUG SenderThread:1570 [sender.py:send():378] send: header
5
+ 2024-05-22 18:49:28,223 DEBUG SenderThread:1570 [sender.py:send():378] send: run
6
+ 2024-05-22 18:49:28,482 INFO SenderThread:1570 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files
7
+ 2024-05-22 18:49:28,482 INFO SenderThread:1570 [sender.py:_start_run_threads():1123] run started: kt8p2r8k with start time 1716403768.210122
8
+ 2024-05-22 18:49:28,483 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-22 18:49:28,484 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-22 18:49:28,640 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-22 18:49:28,642 DEBUG HandlerThread:1570 [system_info.py:__init__():26] System info init
12
+ 2024-05-22 18:49:28,643 DEBUG HandlerThread:1570 [system_info.py:__init__():41] System info init done
13
+ 2024-05-22 18:49:28,643 INFO HandlerThread:1570 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-22 18:49:28,643 INFO SystemMonitor:1570 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-22 18:49:28,643 INFO HandlerThread:1570 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-22 18:49:28,650 INFO SystemMonitor:1570 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-22 18:49:28,650 INFO SystemMonitor:1570 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-22 18:49:28,652 INFO SystemMonitor:1570 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-22 18:49:28,652 INFO SystemMonitor:1570 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-22 18:49:28,717 DEBUG HandlerThread:1570 [system_info.py:probe():150] Probing system
21
+ 2024-05-22 18:49:28,720 DEBUG HandlerThread:1570 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-22 18:49:28,730 ERROR HandlerThread:1570 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-22 18:49:28,730 DEBUG HandlerThread:1570 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-22 18:49:28,730 DEBUG HandlerThread:1570 [system_info.py:probe():198] Probing system done
30
+ 2024-05-22 18:49:28,730 DEBUG HandlerThread:1570 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-22T18:49:28.717571', 'startedAt': '2024-05-22T18:49:28.186831', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.1428562499996, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64437866210938}}, 'memory': {'total': 1007.4379997253418}}
31
+ 2024-05-22 18:49:28,731 INFO HandlerThread:1570 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-22 18:49:28,731 INFO HandlerThread:1570 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-22 18:49:28,733 INFO HandlerThread:1570 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-22 18:49:28,739 DEBUG SenderThread:1570 [sender.py:send():378] send: files
35
+ 2024-05-22 18:49:28,739 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-22 18:49:28,914 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-22 18:49:28,914 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-22 18:49:28,916 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-22 18:49:28,917 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-22 18:49:29,020 DEBUG SenderThread:1570 [sender.py:send():378] send: telemetry
41
+ 2024-05-22 18:49:29,397 INFO wandb-upload_0:1570 [upload_job.py:push():130] Uploaded file /tmp/tmpisanswpdwandb/k50sf3dt-wandb-metadata.json
42
+ 2024-05-22 18:49:29,484 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/wandb-metadata.json
43
+ 2024-05-22 18:49:29,484 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log
44
+ 2024-05-22 18:49:29,484 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/requirements.txt
45
+ 2024-05-22 18:49:31,484 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log
46
+ 2024-05-22 18:49:34,023 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-22 18:49:39,336 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-22 18:49:39,495 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log
49
+ 2024-05-22 18:49:39,761 DEBUG SenderThread:1570 [sender.py:send():378] send: exit
50
+ 2024-05-22 18:49:39,761 INFO SenderThread:1570 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-22 18:49:39,761 INFO SenderThread:1570 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-22 18:49:39,762 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-22 18:49:39,762 INFO SenderThread:1570 [sender.py:send_exit():593] send defer
54
+ 2024-05-22 18:49:39,763 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-22 18:49:39,763 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-22 18:49:39,763 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-22 18:49:39,763 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-22 18:49:39,763 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-22 18:49:39,763 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-22 18:49:39,763 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-22 18:49:39,763 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-22 18:49:39,763 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-22 18:49:39,763 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-22 18:49:39,763 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-22 18:49:39,763 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-22 18:49:39,763 INFO HandlerThread:1570 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-22 18:49:39,763 DEBUG SystemMonitor:1570 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-22 18:49:39,763 DEBUG SystemMonitor:1570 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-22 18:49:39,763 DEBUG SystemMonitor:1570 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-22 18:49:39,764 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-22 18:49:39,764 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-22 18:49:39,764 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-22 18:49:39,765 INFO HandlerThread:1570 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-22 18:49:39,765 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-22 18:49:39,765 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
78
+ 2024-05-22 18:49:39,765 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 3
79
+ 2024-05-22 18:49:39,765 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
80
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 3
81
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 4
82
+ 2024-05-22 18:49:39,765 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
83
+ 2024-05-22 18:49:39,765 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 4
84
+ 2024-05-22 18:49:39,765 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
85
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 4
86
+ 2024-05-22 18:49:39,765 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 5
87
+ 2024-05-22 18:49:39,765 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
88
+ 2024-05-22 18:49:39,765 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 5
89
+ 2024-05-22 18:49:39,766 DEBUG SenderThread:1570 [sender.py:send():378] send: summary
90
+ 2024-05-22 18:49:39,766 INFO SenderThread:1570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
91
+ 2024-05-22 18:49:39,767 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
92
+ 2024-05-22 18:49:39,767 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 5
93
+ 2024-05-22 18:49:39,767 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 6
94
+ 2024-05-22 18:49:39,767 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
95
+ 2024-05-22 18:49:39,767 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 6
96
+ 2024-05-22 18:49:39,767 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
97
+ 2024-05-22 18:49:39,767 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 6
98
+ 2024-05-22 18:49:39,771 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: status_report
99
+ 2024-05-22 18:49:39,865 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 7
100
+ 2024-05-22 18:49:39,865 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-22 18:49:39,865 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 7
102
+ 2024-05-22 18:49:39,865 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
103
+ 2024-05-22 18:49:39,865 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 7
104
+ 2024-05-22 18:49:40,496 INFO Thread-12 :1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/config.yaml
105
+ 2024-05-22 18:49:40,496 INFO Thread-12 :1570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/wandb-summary.json
106
+ 2024-05-22 18:49:40,761 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit
107
+ 2024-05-22 18:49:41,045 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-22 18:49:41,046 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit
109
+ 2024-05-22 18:49:41,046 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
110
+ 2024-05-22 18:49:41,046 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 8
111
+ 2024-05-22 18:49:41,046 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
112
+ 2024-05-22 18:49:41,046 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 8
113
+ 2024-05-22 18:49:41,046 INFO SenderThread:1570 [job_builder.py:build():432] Attempting to build job artifact
114
+ 2024-05-22 18:49:41,047 INFO SenderThread:1570 [job_builder.py:_get_source_type():576] no source found
115
+ 2024-05-22 18:49:41,047 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 9
116
+ 2024-05-22 18:49:41,047 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
117
+ 2024-05-22 18:49:41,047 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 9
118
+ 2024-05-22 18:49:41,047 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
119
+ 2024-05-22 18:49:41,047 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 9
120
+ 2024-05-22 18:49:41,047 INFO SenderThread:1570 [dir_watcher.py:finish():358] shutting down directory watcher
121
+ 2024-05-22 18:49:41,498 INFO SenderThread:1570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log
122
+ 2024-05-22 18:49:41,498 INFO SenderThread:1570 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files
123
+ 2024-05-22 18:49:41,498 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/wandb-metadata.json wandb-metadata.json
124
+ 2024-05-22 18:49:41,498 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/requirements.txt requirements.txt
125
+ 2024-05-22 18:49:41,498 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/wandb-summary.json wandb-summary.json
126
+ 2024-05-22 18:49:41,501 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log output.log
127
+ 2024-05-22 18:49:41,503 INFO SenderThread:1570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/config.yaml config.yaml
128
+ 2024-05-22 18:49:41,503 INFO SenderThread:1570 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-22 18:49:41,503 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-22 18:49:41,503 INFO HandlerThread:1570 [handler.py:handle_request_defer():184] handle defer: 10
131
+ 2024-05-22 18:49:41,503 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-22 18:49:41,503 INFO SenderThread:1570 [sender.py:send_request_defer():609] handle sender defer: 10
133
+ 2024-05-22 18:49:41,503 INFO SenderThread:1570 [file_pusher.py:finish():169] shutting down file pusher
134
+ 2024-05-22 18:49:41,752 INFO wandb-upload_0:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/requirements.txt
135
+ 2024-05-22 18:49:41,761 DEBUG HandlerThread:1570 [handler.py:handle_request():158] handle_request: poll_exit
136
+ 2024-05-22 18:49:41,761 DEBUG SenderThread:1570 [sender.py:send_request():405] send_request: poll_exit
137
+ 2024-05-22 18:49:42,137 INFO wandb-upload_2:1570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/files/output.log
lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Configure stats pid to 1415
3
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-22 18:49:28,204 WARNING MainThread:1415 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug.log
11
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240522_184928-kt8p2r8k/logs/debug-internal.log
12
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:init():610] starting backend
16
+ 2024-05-22 18:49:28,204 INFO MainThread:1415 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-22 18:49:28,209 INFO MainThread:1415 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-22 18:49:28,209 INFO MainThread:1415 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-22 18:49:28,213 INFO MainThread:1415 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-22 18:49:28,223 INFO MainThread:1415 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-22 18:49:28,483 INFO MainThread:1415 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-22 18:49:28,634 INFO MainThread:1415 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-22 18:49:28,634 INFO MainThread:1415 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-22 18:49:28,915 INFO MainThread:1415 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-22 18:49:28,915 INFO MainThread:1415 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-22 18:49:28,915 INFO MainThread:1415 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-22 18:49:28,915 INFO MainThread:1415 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-22 18:49:28,918 INFO MainThread:1415 [wandb_init.py:init():838] run started, returning control to user process
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716441730
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:05:22:11,517 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:05:22:20,014 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:05:22:20,015 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:05:22:20,016 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000'}
6
+ 2024-05-23:05:22:22,340 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T05:22:11.312115",
5
+ "startedAt": "2024-05-23T05:22:10.734091",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2334.21898125,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3333.083,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3399.237,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 212.24552154541016
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.43798828125
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 05:22:10,753 INFO StreamThr :11124 [internal.py:wandb_internal():85] W&B internal server running at pid: 11124, started at: 2024-05-23 05:22:10.751648
2
+ 2024-05-23 05:22:10,754 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 05:22:10,759 INFO WriterThread:11124 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/run-qlxjd76q.wandb
4
+ 2024-05-23 05:22:10,764 DEBUG SenderThread:11124 [sender.py:send():378] send: header
5
+ 2024-05-23 05:22:10,768 DEBUG SenderThread:11124 [sender.py:send():378] send: run
6
+ 2024-05-23 05:22:11,073 INFO SenderThread:11124 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files
7
+ 2024-05-23 05:22:11,074 INFO SenderThread:11124 [sender.py:_start_run_threads():1123] run started: qlxjd76q with start time 1716441730.75226
8
+ 2024-05-23 05:22:11,077 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 05:22:11,078 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 05:22:11,196 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 05:22:11,199 DEBUG HandlerThread:11124 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 05:22:11,199 DEBUG HandlerThread:11124 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 05:22:11,199 INFO HandlerThread:11124 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 05:22:11,199 INFO SystemMonitor:11124 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 05:22:11,199 INFO HandlerThread:11124 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 05:22:11,206 INFO SystemMonitor:11124 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 05:22:11,206 INFO SystemMonitor:11124 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 05:22:11,206 INFO SystemMonitor:11124 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 05:22:11,214 INFO SystemMonitor:11124 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 05:22:11,312 DEBUG HandlerThread:11124 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 05:22:11,316 DEBUG HandlerThread:11124 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 05:22:11,326 ERROR HandlerThread:11124 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 05:22:11,326 DEBUG HandlerThread:11124 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 05:22:11,326 DEBUG HandlerThread:11124 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 05:22:11,326 DEBUG HandlerThread:11124 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T05:22:11.312115', 'startedAt': '2024-05-23T05:22:10.734091', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.21898125, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3333.083, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.237, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.24552154541016}}, 'memory': {'total': 1007.43798828125}}
31
+ 2024-05-23 05:22:11,326 INFO HandlerThread:11124 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 05:22:11,326 INFO HandlerThread:11124 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 05:22:11,329 INFO HandlerThread:11124 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 05:22:11,334 DEBUG SenderThread:11124 [sender.py:send():378] send: files
35
+ 2024-05-23 05:22:11,334 INFO SenderThread:11124 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 05:22:11,511 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 05:22:11,511 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 05:22:11,512 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 05:22:11,512 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 05:22:11,629 DEBUG SenderThread:11124 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 05:22:11,888 INFO wandb-upload_0:11124 [upload_job.py:push():130] Uploaded file /tmp/tmpxfu0gkt0wandb/yw4erfpx-wandb-metadata.json
42
+ 2024-05-23 05:22:12,076 INFO Thread-12 :11124 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log
43
+ 2024-05-23 05:22:12,076 INFO Thread-12 :11124 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/requirements.txt
44
+ 2024-05-23 05:22:12,076 INFO Thread-12 :11124 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-metadata.json
45
+ 2024-05-23 05:22:14,076 INFO Thread-12 :11124 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log
46
+ 2024-05-23 05:22:16,633 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 05:22:22,017 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 05:22:22,083 INFO Thread-12 :11124 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log
49
+ 2024-05-23 05:22:22,347 DEBUG SenderThread:11124 [sender.py:send():378] send: exit
50
+ 2024-05-23 05:22:22,348 INFO SenderThread:11124 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 05:22:22,348 INFO SenderThread:11124 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 05:22:22,349 INFO SenderThread:11124 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 05:22:22,349 INFO SenderThread:11124 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 05:22:22,349 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 05:22:22,349 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 05:22:22,350 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 05:22:22,350 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 05:22:22,350 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 05:22:22,350 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 05:22:22,350 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 05:22:22,350 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 05:22:22,350 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 05:22:22,350 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 05:22:22,350 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 05:22:22,350 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 05:22:22,350 INFO HandlerThread:11124 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 05:22:22,350 DEBUG SystemMonitor:11124 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 05:22:22,351 DEBUG SystemMonitor:11124 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 05:22:22,351 INFO HandlerThread:11124 [interfaces.py:finish():200] Joined cpu monitor
70
+ 2024-05-23 05:22:22,351 DEBUG SystemMonitor:11124 [system_monitor.py:_start():183] Publishing last batch of metrics
71
+ 2024-05-23 05:22:22,351 INFO HandlerThread:11124 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 05:22:22,353 INFO HandlerThread:11124 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 05:22:22,353 INFO HandlerThread:11124 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 05:22:22,353 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 05:22:22,353 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 05:22:22,353 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 05:22:22,353 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
78
+ 2024-05-23 05:22:22,353 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 3
79
+ 2024-05-23 05:22:22,354 DEBUG SenderThread:11124 [sender.py:send():378] send: stats
80
+ 2024-05-23 05:22:22,355 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 05:22:22,355 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 05:22:22,355 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 05:22:22,355 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 05:22:22,355 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 05:22:22,355 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 05:22:22,355 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 05:22:22,355 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 05:22:22,355 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 05:22:22,355 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 05:22:22,355 DEBUG SenderThread:11124 [sender.py:send():378] send: summary
91
+ 2024-05-23 05:22:22,356 INFO SenderThread:11124 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 05:22:22,356 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 05:22:22,356 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 05:22:22,356 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 05:22:22,356 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 05:22:22,356 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 05:22:22,357 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 05:22:22,357 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 05:22:22,361 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 05:22:22,448 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 05:22:22,448 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 05:22:22,448 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 05:22:22,448 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 05:22:22,448 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 05:22:23,084 INFO Thread-12 :11124 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/config.yaml
106
+ 2024-05-23 05:22:23,084 INFO Thread-12 :11124 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-summary.json
107
+ 2024-05-23 05:22:23,347 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 05:22:23,657 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 05:22:23,657 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 05:22:23,657 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 05:22:23,658 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 05:22:23,658 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 05:22:23,658 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 05:22:23,658 INFO SenderThread:11124 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 05:22:23,658 INFO SenderThread:11124 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 05:22:23,658 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 05:22:23,659 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 05:22:23,659 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 05:22:23,659 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 05:22:23,659 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 05:22:23,659 INFO SenderThread:11124 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 05:22:24,085 INFO SenderThread:11124 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log
123
+ 2024-05-23 05:22:24,085 INFO SenderThread:11124 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files
124
+ 2024-05-23 05:22:24,086 INFO SenderThread:11124 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/requirements.txt requirements.txt
125
+ 2024-05-23 05:22:24,086 INFO SenderThread:11124 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-metadata.json wandb-metadata.json
126
+ 2024-05-23 05:22:24,086 INFO SenderThread:11124 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/config.yaml config.yaml
127
+ 2024-05-23 05:22:24,088 INFO SenderThread:11124 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log output.log
128
+ 2024-05-23 05:22:24,090 INFO SenderThread:11124 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-summary.json wandb-summary.json
129
+ 2024-05-23 05:22:24,090 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 05:22:24,091 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 05:22:24,091 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 05:22:24,091 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 05:22:24,091 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 05:22:24,091 INFO SenderThread:11124 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 05:22:24,320 INFO wandb-upload_0:11124 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/requirements.txt
136
+ 2024-05-23 05:22:24,348 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: poll_exit
137
+ 2024-05-23 05:22:24,348 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: poll_exit
138
+ 2024-05-23 05:22:24,668 INFO wandb-upload_3:11124 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/wandb-summary.json
139
+ 2024-05-23 05:22:24,679 INFO wandb-upload_2:11124 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/output.log
140
+ 2024-05-23 05:22:24,704 INFO wandb-upload_1:11124 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/files/config.yaml
141
+ 2024-05-23 05:22:24,904 INFO Thread-11 (_thread_body):11124 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 05:22:24,904 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 05:22:24,904 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 05:22:24,904 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 05:22:24,904 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 05:22:24,904 INFO SenderThread:11124 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 05:22:24,905 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 05:22:24,905 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 05:22:24,905 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 05:22:24,905 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 05:22:24,905 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 05:22:24,905 INFO SenderThread:11124 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 05:22:24,985 INFO SenderThread:11124 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 05:22:24,985 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 05:22:24,985 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 05:22:24,985 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 05:22:24,985 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 05:22:24,985 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 05:22:24,985 INFO SenderThread:11124 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 05:22:24,985 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 05:22:24,985 INFO HandlerThread:11124 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 05:22:24,985 DEBUG SenderThread:11124 [sender.py:send():378] send: final
163
+ 2024-05-23 05:22:24,985 DEBUG SenderThread:11124 [sender.py:send():378] send: footer
164
+ 2024-05-23 05:22:24,985 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 05:22:24,985 INFO SenderThread:11124 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: poll_exit
168
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: server_info
169
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: get_summary
170
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: sampled_history
171
+ 2024-05-23 05:22:24,986 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: internal_messages
172
+ 2024-05-23 05:22:24,987 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: poll_exit
173
+ 2024-05-23 05:22:24,987 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 05:22:24,987 DEBUG SenderThread:11124 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 05:22:25,039 INFO MainThread:11124 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 05:22:25,039 INFO MainThread:11124 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 05:22:25,039 INFO MainThread:11124 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 05:22:25,039 DEBUG HandlerThread:11124 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 05:22:25,039 INFO HandlerThread:11124 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 05:22:25,987 INFO WriterThread:11124 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/run-qlxjd76q.wandb
181
+ 2024-05-23 05:22:26,039 INFO SenderThread:11124 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 05:22:26,039 INFO SenderThread:11124 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 05:22:26,039 INFO SenderThread:11124 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Configure stats pid to 10969
3
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 05:22:10,746 WARNING MainThread:10969 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug.log
11
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/logs/debug-internal.log
12
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 05:22:10,746 INFO MainThread:10969 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 05:22:10,750 INFO MainThread:10969 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 05:22:10,752 INFO MainThread:10969 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 05:22:10,759 INFO MainThread:10969 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 05:22:10,767 INFO MainThread:10969 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 05:22:11,077 INFO MainThread:10969 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 05:22:11,190 INFO MainThread:10969 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 05:22:11,190 INFO MainThread:10969 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 05:22:11,512 INFO MainThread:10969 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 05:22:11,512 INFO MainThread:10969 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 05:22:11,512 INFO MainThread:10969 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 05:22:11,512 INFO MainThread:10969 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 05:22:11,515 INFO MainThread:10969 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 05:22:26,040 WARNING MsgRouterThr:10969 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_052210-qlxjd76q/run-qlxjd76q.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716451301
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:08:01:42,093 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:08:01:50,433 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:08:01:50,434 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:08:01:50,434 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000'}
6
+ 2024-05-23:08:01:52,729 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T08:01:41.881563",
5
+ "startedAt": "2024-05-23T08:01:41.399462",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2325.7879625,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.002,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3399.997,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.64005279541016
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379806518555
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 10}}
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug-internal.log ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 08:01:41,420 INFO StreamThr :3068 [internal.py:wandb_internal():85] W&B internal server running at pid: 3068, started at: 2024-05-23 08:01:41.418713
2
+ 2024-05-23 08:01:41,425 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 08:01:41,425 INFO WriterThread:3068 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/run-2cqsvyos.wandb
4
+ 2024-05-23 08:01:41,428 DEBUG SenderThread:3068 [sender.py:send():378] send: header
5
+ 2024-05-23 08:01:41,431 DEBUG SenderThread:3068 [sender.py:send():378] send: run
6
+ 2024-05-23 08:01:41,684 INFO SenderThread:3068 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files
7
+ 2024-05-23 08:01:41,684 INFO SenderThread:3068 [sender.py:_start_run_threads():1123] run started: 2cqsvyos with start time 1716451301.418951
8
+ 2024-05-23 08:01:41,688 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 08:01:41,688 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 08:01:41,806 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 08:01:41,808 DEBUG HandlerThread:3068 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 08:01:41,808 DEBUG HandlerThread:3068 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 08:01:41,808 INFO HandlerThread:3068 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 08:01:41,808 INFO SystemMonitor:3068 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 08:01:41,808 INFO HandlerThread:3068 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 08:01:41,815 INFO SystemMonitor:3068 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 08:01:41,816 INFO SystemMonitor:3068 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 08:01:41,817 INFO SystemMonitor:3068 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 08:01:41,818 INFO SystemMonitor:3068 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 08:01:41,881 DEBUG HandlerThread:3068 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 08:01:41,884 DEBUG HandlerThread:3068 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 08:01:41,894 ERROR HandlerThread:3068 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 08:01:41,894 DEBUG HandlerThread:3068 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 08:01:41,894 DEBUG HandlerThread:3068 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 08:01:41,894 DEBUG HandlerThread:3068 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T08:01:41.881563', 'startedAt': '2024-05-23T08:01:41.399462', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2325.7879625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64005279541016}}, 'memory': {'total': 1007.4379806518555}}
31
+ 2024-05-23 08:01:41,894 INFO HandlerThread:3068 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 08:01:41,895 INFO HandlerThread:3068 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 08:01:41,897 INFO HandlerThread:3068 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 08:01:41,902 DEBUG SenderThread:3068 [sender.py:send():378] send: files
35
+ 2024-05-23 08:01:41,902 INFO SenderThread:3068 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 08:01:42,079 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 08:01:42,079 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 08:01:42,080 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 08:01:42,089 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 08:01:42,255 DEBUG SenderThread:3068 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 08:01:42,485 INFO wandb-upload_0:3068 [upload_job.py:push():130] Uploaded file /tmp/tmpzhezkosawandb/o4yjpi0c-wandb-metadata.json
42
+ 2024-05-23 08:01:42,687 INFO Thread-12 :3068 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-metadata.json
43
+ 2024-05-23 08:01:42,687 INFO Thread-12 :3068 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/requirements.txt
44
+ 2024-05-23 08:01:42,687 INFO Thread-12 :3068 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log
45
+ 2024-05-23 08:01:44,686 INFO Thread-12 :3068 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log
46
+ 2024-05-23 08:01:47,257 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 08:01:52,435 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 08:01:52,693 INFO Thread-12 :3068 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log
49
+ 2024-05-23 08:01:52,736 DEBUG SenderThread:3068 [sender.py:send():378] send: exit
50
+ 2024-05-23 08:01:52,736 INFO SenderThread:3068 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 08:01:52,736 INFO SenderThread:3068 [sender.py:send_exit():587] handling runtime: 10
52
+ 2024-05-23 08:01:52,738 INFO SenderThread:3068 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 08:01:52,738 INFO SenderThread:3068 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 08:01:52,738 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 08:01:52,738 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 08:01:52,738 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 08:01:52,738 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 08:01:52,738 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 08:01:52,738 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 08:01:52,738 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 08:01:52,739 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 08:01:52,739 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 08:01:52,739 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 08:01:52,739 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 08:01:52,739 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 08:01:52,739 INFO HandlerThread:3068 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 08:01:52,739 DEBUG SystemMonitor:3068 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 08:01:52,739 DEBUG SystemMonitor:3068 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 08:01:52,739 DEBUG SystemMonitor:3068 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 08:01:52,742 INFO HandlerThread:3068 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 08:01:52,742 INFO HandlerThread:3068 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 08:01:52,742 INFO HandlerThread:3068 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 08:01:52,742 INFO HandlerThread:3068 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 08:01:52,743 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 08:01:52,743 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 08:01:52,743 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 08:01:52,743 DEBUG SenderThread:3068 [sender.py:send():378] send: stats
78
+ 2024-05-23 08:01:52,744 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 08:01:52,744 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 08:01:52,744 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 08:01:52,744 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 08:01:52,744 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 08:01:52,744 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 08:01:52,744 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 08:01:52,744 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 08:01:52,744 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 08:01:52,744 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 08:01:52,744 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 08:01:52,744 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 08:01:52,745 DEBUG SenderThread:3068 [sender.py:send():378] send: summary
91
+ 2024-05-23 08:01:52,745 INFO SenderThread:3068 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 08:01:52,746 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 08:01:52,746 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 08:01:52,746 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 08:01:52,746 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 08:01:52,746 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 08:01:52,746 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 08:01:52,746 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 08:01:52,751 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 08:01:52,826 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 08:01:52,826 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 08:01:52,826 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 08:01:52,826 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 08:01:52,826 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 08:01:53,694 INFO Thread-12 :3068 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/config.yaml
106
+ 2024-05-23 08:01:53,694 INFO Thread-12 :3068 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-summary.json
107
+ 2024-05-23 08:01:53,736 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 08:01:54,277 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 08:01:54,277 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 08:01:54,277 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 08:01:54,277 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 08:01:54,277 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 08:01:54,277 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 08:01:54,277 INFO SenderThread:3068 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 08:01:54,278 INFO SenderThread:3068 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 08:01:54,278 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 08:01:54,278 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 08:01:54,278 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 08:01:54,278 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 08:01:54,278 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 08:01:54,278 INFO SenderThread:3068 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 08:01:54,695 INFO SenderThread:3068 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log
123
+ 2024-05-23 08:01:54,696 INFO SenderThread:3068 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files
124
+ 2024-05-23 08:01:54,696 INFO SenderThread:3068 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-summary.json wandb-summary.json
125
+ 2024-05-23 08:01:54,696 INFO SenderThread:3068 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/requirements.txt requirements.txt
126
+ 2024-05-23 08:01:54,698 INFO SenderThread:3068 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-metadata.json wandb-metadata.json
127
+ 2024-05-23 08:01:54,700 INFO SenderThread:3068 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/config.yaml config.yaml
128
+ 2024-05-23 08:01:54,701 INFO SenderThread:3068 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log output.log
129
+ 2024-05-23 08:01:54,701 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 08:01:54,701 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 08:01:54,701 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 08:01:54,701 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 08:01:54,701 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 08:01:54,701 INFO SenderThread:3068 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 08:01:54,736 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: poll_exit
136
+ 2024-05-23 08:01:54,736 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: poll_exit
137
+ 2024-05-23 08:01:54,948 INFO wandb-upload_0:3068 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/wandb-summary.json
138
+ 2024-05-23 08:01:55,376 INFO wandb-upload_2:3068 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/config.yaml
139
+ 2024-05-23 08:01:55,502 INFO wandb-upload_1:3068 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/requirements.txt
140
+ 2024-05-23 08:01:55,563 INFO wandb-upload_3:3068 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/files/output.log
141
+ 2024-05-23 08:01:55,736 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: poll_exit
142
+ 2024-05-23 08:01:55,737 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: poll_exit
143
+ 2024-05-23 08:01:55,763 INFO Thread-11 (_thread_body):3068 [sender.py:transition_state():613] send defer: 11
144
+ 2024-05-23 08:01:55,764 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
145
+ 2024-05-23 08:01:55,764 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 11
146
+ 2024-05-23 08:01:55,764 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
147
+ 2024-05-23 08:01:55,764 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 11
148
+ 2024-05-23 08:01:55,764 INFO SenderThread:3068 [file_pusher.py:join():175] waiting for file pusher
149
+ 2024-05-23 08:01:55,764 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 12
150
+ 2024-05-23 08:01:55,764 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
151
+ 2024-05-23 08:01:55,764 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 12
152
+ 2024-05-23 08:01:55,764 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
153
+ 2024-05-23 08:01:55,764 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 12
154
+ 2024-05-23 08:01:55,764 INFO SenderThread:3068 [file_stream.py:finish():601] file stream finish called
155
+ 2024-05-23 08:01:55,827 INFO SenderThread:3068 [file_stream.py:finish():605] file stream finish is done
156
+ 2024-05-23 08:01:55,827 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 13
157
+ 2024-05-23 08:01:55,827 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
158
+ 2024-05-23 08:01:55,827 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 13
159
+ 2024-05-23 08:01:55,827 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
160
+ 2024-05-23 08:01:55,827 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 13
161
+ 2024-05-23 08:01:55,827 INFO SenderThread:3068 [sender.py:transition_state():613] send defer: 14
162
+ 2024-05-23 08:01:55,827 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: defer
163
+ 2024-05-23 08:01:55,827 INFO HandlerThread:3068 [handler.py:handle_request_defer():184] handle defer: 14
164
+ 2024-05-23 08:01:55,827 DEBUG SenderThread:3068 [sender.py:send():378] send: final
165
+ 2024-05-23 08:01:55,827 DEBUG SenderThread:3068 [sender.py:send():378] send: footer
166
+ 2024-05-23 08:01:55,827 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: defer
167
+ 2024-05-23 08:01:55,828 INFO SenderThread:3068 [sender.py:send_request_defer():609] handle sender defer: 14
168
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: poll_exit
170
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: server_info
171
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: get_summary
172
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: sampled_history
173
+ 2024-05-23 08:01:55,828 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: internal_messages
174
+ 2024-05-23 08:01:55,829 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: poll_exit
175
+ 2024-05-23 08:01:55,829 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: poll_exit
176
+ 2024-05-23 08:01:55,829 DEBUG SenderThread:3068 [sender.py:send_request():405] send_request: server_info
177
+ 2024-05-23 08:01:55,884 INFO MainThread:3068 [wandb_run.py:_footer_history_summary_info():3994] rendering history
178
+ 2024-05-23 08:01:55,884 INFO MainThread:3068 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
179
+ 2024-05-23 08:01:55,884 INFO MainThread:3068 [wandb_run.py:_footer_sync_info():3953] logging synced files
180
+ 2024-05-23 08:01:55,885 DEBUG HandlerThread:3068 [handler.py:handle_request():158] handle_request: shutdown
181
+ 2024-05-23 08:01:55,885 INFO HandlerThread:3068 [handler.py:finish():882] shutting down handler
182
+ 2024-05-23 08:01:56,829 INFO WriterThread:3068 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/run-2cqsvyos.wandb
183
+ 2024-05-23 08:01:56,884 INFO SenderThread:3068 [sender.py:finish():1545] shutting down sender
184
+ 2024-05-23 08:01:56,884 INFO SenderThread:3068 [file_pusher.py:finish():169] shutting down file pusher
185
+ 2024-05-23 08:01:56,884 INFO SenderThread:3068 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 08:01:41,412 INFO MainThread:2913 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Configure stats pid to 2913
3
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 08:01:41,413 WARNING MainThread:2913 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug.log
11
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/logs/debug-internal.log
12
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 08:01:41,413 INFO MainThread:2913 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 08:01:41,417 INFO MainThread:2913 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 08:01:41,418 INFO MainThread:2913 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 08:01:41,422 INFO MainThread:2913 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 08:01:41,430 INFO MainThread:2913 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 08:01:41,687 INFO MainThread:2913 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 08:01:41,800 INFO MainThread:2913 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 08:01:41,800 INFO MainThread:2913 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 08:01:42,080 INFO MainThread:2913 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 08:01:42,081 INFO MainThread:2913 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 08:01:42,081 INFO MainThread:2913 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 08:01:42,081 INFO MainThread:2913 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 08:01:42,090 INFO MainThread:2913 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 08:01:56,886 WARNING MsgRouterThr:2913 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_080141-2cqsvyos/run-2cqsvyos.wandb ADDED
Binary file (11.1 kB). View file
 
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/config.yaml ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.1
8
+ framework: huggingface
9
+ huggingface_version: 4.36.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1717845026
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 2
38
+ - 13
39
+ - 23
40
+ - 62
41
+ 4: 3.10.12
42
+ 5: 0.17.1
43
+ 6: 4.36.2
44
+ 8:
45
+ - 5
46
+ 13: linux-x86_64
47
+ task_configs:
48
+ desc: null
49
+ value:
50
+ arc_easy:
51
+ task: arc_easy
52
+ group:
53
+ - ai2_arc
54
+ dataset_path: allenai/ai2_arc
55
+ dataset_name: ARC-Easy
56
+ training_split: train
57
+ validation_split: validation
58
+ test_split: test
59
+ doc_to_text: 'Question: {{question}}
60
+
61
+ Answer:'
62
+ doc_to_target: '{{choices.label.index(answerKey)}}'
63
+ doc_to_choice: '{{choices.text}}'
64
+ description: ''
65
+ target_delimiter: ' '
66
+ fewshot_delimiter: '
67
+
68
+
69
+ '
70
+ num_fewshot: 0
71
+ metric_list:
72
+ - metric: acc
73
+ aggregation: mean
74
+ higher_is_better: true
75
+ - metric: acc_norm
76
+ aggregation: mean
77
+ higher_is_better: true
78
+ output_type: multiple_choice
79
+ repeats: 1
80
+ should_decontaminate: true
81
+ doc_to_decontamination_query: 'Question: {{question}}
82
+
83
+ Answer:'
84
+ metadata:
85
+ version: 1.0
86
+ boolq:
87
+ task: boolq
88
+ group:
89
+ - super-glue-lm-eval-v1
90
+ dataset_path: super_glue
91
+ dataset_name: boolq
92
+ training_split: train
93
+ validation_split: validation
94
+ doc_to_text: '{{passage}}
95
+
96
+ Question: {{question}}?
97
+
98
+ Answer:'
99
+ doc_to_target: label
100
+ doc_to_choice:
101
+ - 'no'
102
+ - 'yes'
103
+ description: ''
104
+ target_delimiter: ' '
105
+ fewshot_delimiter: '
106
+
107
+
108
+ '
109
+ num_fewshot: 0
110
+ metric_list:
111
+ - metric: acc
112
+ output_type: multiple_choice
113
+ repeats: 1
114
+ should_decontaminate: true
115
+ doc_to_decontamination_query: passage
116
+ metadata:
117
+ version: 2.0
118
+ copa:
119
+ task: copa
120
+ group:
121
+ - super-glue-lm-eval-v1
122
+ dataset_path: super_glue
123
+ dataset_name: copa
124
+ training_split: train
125
+ validation_split: validation
126
+ doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\
127
+ \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\
128
+ \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\
129
+ \ {connector}\"\n"
130
+ doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\
131
+ ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\
132
+ \ return \" \" + convert_choice(correct_choice)\n"
133
+ doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\
134
+ choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n"
135
+ description: ''
136
+ target_delimiter: ' '
137
+ fewshot_delimiter: '
138
+
139
+
140
+ '
141
+ num_fewshot: 0
142
+ metric_list:
143
+ - metric: acc
144
+ output_type: multiple_choice
145
+ repeats: 1
146
+ should_decontaminate: false
147
+ metadata:
148
+ version: 1.0
149
+ indic_arc_challenge_hi:
150
+ task: indic_arc_challenge_hi
151
+ group: Cognitive-Lab/Indic-ARC-Challenge
152
+ dataset_path: Cognitive-Lab/Indic-ARC-Challenge
153
+ dataset_name: hi
154
+ test_split: test
155
+ doc_to_text: 'Question: {{translated_question}}
156
+
157
+ Answer:'
158
+ doc_to_target: '{{translated_choices.label.index(answerKey)}}'
159
+ doc_to_choice: '{{translated_choices.text}}'
160
+ description: ''
161
+ target_delimiter: ' '
162
+ fewshot_delimiter: '
163
+
164
+
165
+ '
166
+ num_fewshot: 0
167
+ metric_list:
168
+ - metric: acc
169
+ aggregation: mean
170
+ higher_is_better: true
171
+ output_type: multiple_choice
172
+ repeats: 1
173
+ should_decontaminate: true
174
+ doc_to_decontamination_query: 'Question: {{translated_question}}
175
+
176
+ Answer:'
177
+ metadata:
178
+ version: 1.0
179
+ indic_arc_easy_hi:
180
+ task: indic_arc_easy_hi
181
+ group: Cognitive-Lab/Indic-ARC-Easy
182
+ dataset_path: Cognitive-Lab/Indic-ARC-Easy
183
+ dataset_name: hi
184
+ test_split: test
185
+ doc_to_text: 'Question: {{translated_question}}
186
+
187
+ Answer:'
188
+ doc_to_target: '{{translated_choices.label.index(answerKey)}}'
189
+ doc_to_choice: '{{translated_choices.text}}'
190
+ description: ''
191
+ target_delimiter: ' '
192
+ fewshot_delimiter: '
193
+
194
+
195
+ '
196
+ num_fewshot: 0
197
+ metric_list:
198
+ - metric: acc
199
+ aggregation: mean
200
+ higher_is_better: true
201
+ output_type: multiple_choice
202
+ repeats: 1
203
+ should_decontaminate: true
204
+ doc_to_decontamination_query: 'Question: {{translated_question}}
205
+
206
+ Answer:'
207
+ metadata:
208
+ version: 1.0
209
+ indic_boolq_hi:
210
+ task: indic_boolq_hi
211
+ group: Cognitive-Lab/Indic-BoolQ
212
+ dataset_path: Cognitive-Lab/Indic-BoolQ
213
+ dataset_name: hi
214
+ validation_split: validation
215
+ doc_to_text: 'Passage: {translated_passage}
216
+
217
+ Question: {translated_question.strip()}
218
+
219
+ Answer:'
220
+ doc_to_target: answer
221
+ doc_to_choice:
222
+ - 'true'
223
+ - 'false'
224
+ description: ''
225
+ target_delimiter: ' '
226
+ fewshot_delimiter: '
227
+
228
+
229
+ '
230
+ num_fewshot: 0
231
+ metric_list:
232
+ - metric: acc
233
+ aggregation: mean
234
+ higher_is_better: true
235
+ output_type: multiple_choice
236
+ repeats: 1
237
+ should_decontaminate: false
238
+ metadata:
239
+ version: 1.0
240
+ mrpc:
241
+ task: mrpc
242
+ group: glue
243
+ dataset_path: glue
244
+ dataset_name: mrpc
245
+ training_split: train
246
+ validation_split: validation
247
+ doc_to_text: 'Sentence 1: {{sentence1}}
248
+
249
+ Sentence 2: {{sentence2}}
250
+
251
+ Question: Do both sentences mean the same thing?
252
+
253
+ Answer:'
254
+ doc_to_target: label
255
+ doc_to_choice:
256
+ - 'no'
257
+ - 'yes'
258
+ description: ''
259
+ target_delimiter: ' '
260
+ fewshot_delimiter: '
261
+
262
+
263
+ '
264
+ num_fewshot: 0
265
+ metric_list:
266
+ - metric: acc
267
+ - metric: f1
268
+ output_type: multiple_choice
269
+ repeats: 1
270
+ should_decontaminate: false
271
+ metadata:
272
+ version: 1.0
273
+ piqa:
274
+ task: piqa
275
+ dataset_path: piqa
276
+ training_split: train
277
+ validation_split: validation
278
+ doc_to_text: 'Question: {{goal}}
279
+
280
+ Answer:'
281
+ doc_to_target: label
282
+ doc_to_choice: '{{[sol1, sol2]}}'
283
+ description: ''
284
+ target_delimiter: ' '
285
+ fewshot_delimiter: '
286
+
287
+
288
+ '
289
+ num_fewshot: 0
290
+ metric_list:
291
+ - metric: acc
292
+ aggregation: mean
293
+ higher_is_better: true
294
+ - metric: acc_norm
295
+ aggregation: mean
296
+ higher_is_better: true
297
+ output_type: multiple_choice
298
+ repeats: 1
299
+ should_decontaminate: true
300
+ doc_to_decontamination_query: goal
301
+ metadata:
302
+ version: 1.0
303
+ sst2:
304
+ task: sst2
305
+ group: glue
306
+ dataset_path: glue
307
+ dataset_name: sst2
308
+ training_split: train
309
+ validation_split: validation
310
+ doc_to_text: '{{sentence}}
311
+
312
+ Question: Is this sentence positive or negative?
313
+
314
+ Answer:'
315
+ doc_to_target: label
316
+ doc_to_choice:
317
+ - negative
318
+ - positive
319
+ description: ''
320
+ target_delimiter: ' '
321
+ fewshot_delimiter: '
322
+
323
+
324
+ '
325
+ num_fewshot: 0
326
+ metric_list:
327
+ - metric: acc
328
+ output_type: multiple_choice
329
+ repeats: 1
330
+ should_decontaminate: false
331
+ metadata:
332
+ version: 1.0
333
+ winogrande:
334
+ task: winogrande
335
+ dataset_path: winogrande
336
+ dataset_name: winogrande_xl
337
+ training_split: train
338
+ validation_split: validation
339
+ doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\
340
+ \ return answer_to_num[doc[\"answer\"]]\n"
341
+ doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\
342
+ _\") + 1\n return doc[\"sentence\"][idx:].strip()\n"
343
+ doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\
344
+ _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\
345
+ sentence\"][:idx] + opt for opt in options]\n"
346
+ description: ''
347
+ target_delimiter: ' '
348
+ fewshot_delimiter: '
349
+
350
+
351
+ '
352
+ num_fewshot: 0
353
+ metric_list:
354
+ - metric: acc
355
+ aggregation: mean
356
+ higher_is_better: true
357
+ output_type: multiple_choice
358
+ repeats: 1
359
+ should_decontaminate: true
360
+ doc_to_decontamination_query: sentence
361
+ metadata:
362
+ version: 1.0
363
+ cli_configs:
364
+ desc: null
365
+ value:
366
+ model: hf
367
+ model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step100000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer
368
+ batch_size: auto
369
+ batch_sizes:
370
+ - 64
371
+ device: null
372
+ use_cache: null
373
+ limit: null
374
+ bootstrap_iters: 100000
375
+ gen_kwargs: null
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/media/table/evaluation/eval_results_1_6529e3311149275b8699.table.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.48539857932123126", "0.0140"], ["sst2", 1.0, "none", 0, "acc", "0.5160550458715596", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5321001088139282", "0.0116"], ["piqa", 1.0, "none", 0, "acc_norm", "0.49347116430903154", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.3161764705882353", "0.0230"], ["mrpc", 1.0, "none", 0, "f1", "0.0", "0.0000"], ["indic_boolq_hi", 1.0, "none", 0, "acc", "0.6217125382262997", "0.0085"], ["indic_arc_easy_hi", 1.0, "none", 0, "acc", "0.25084175084175087", "0.0089"], ["indic_arc_challenge_hi", 1.0, "none", 0, "acc", "0.20733788395904437", "0.0118"], ["copa", 1.0, "none", 0, "acc", "0.58", "0.0496"], ["boolq", 2.0, "none", 0, "acc", "0.38073394495412843", "0.0085"], ["arc_easy", 1.0, "none", 0, "acc", "0.26346801346801346", "0.0090"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2668350168350168", "0.0091"]]}
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/output.log ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-06-08:11:10:27,101 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-06-08:11:10:36,720 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'indic_arc_challenge_hi', 'indic_arc_easy_hi', 'indic_boolq_hi', 'mrpc', 'piqa', 'sst2', 'winogrande']
4
+ 2024-06-08:11:10:36,721 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-06-08:11:10:36,721 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step100000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer'}
6
+ 2024-06-08:11:10:39,088 INFO [huggingface.py:164] Using device 'cuda'
7
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
8
+ warnings.warn(
9
+ Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
10
+ Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 16.1MB/s]
11
+ Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 1.48MB/s]
12
+ Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 2.71MB/s]
13
+ Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 706kB/s]
14
+ Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 47691.92 examples/s]
15
+ Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 328325.58 examples/s]
16
+ Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 154162.58 examples/s]
17
+ 2024-06-08:11:11:07,210 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean
18
+ 2024-06-08:11:11:07,211 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
19
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue
20
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
21
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
22
+ warnings.warn(
23
+ Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 38.8MB/s]
24
+ Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 26.7MB/s]
25
+ Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 25.5MB/s]
26
+ Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 22364.23 examples/s]
27
+ Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 22504.25 examples/s]
28
+ Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 22795.47 examples/s]
29
+ 2024-06-08:11:11:10,503 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean
30
+ 2024-06-08:11:11:10,504 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
31
+ Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 43.7MB/s]
32
+ Generating train split: 100%|██████████| 400/400 [00:00<00:00, 16554.72 examples/s]
33
+ Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 13212.90 examples/s]
34
+ Generating test split: 100%|██████████| 500/500 [00:00<00:00, 16539.31 examples/s]
35
+ Downloading readme: 100%|██████████| 2.09k/2.09k [00:00<00:00, 5.02MB/s]
36
+ Downloading data: 100%|██████████| 1.73M/1.73M [00:00<00:00, 5.42MB/s]
37
+ Downloading data: 100%|██████████| 1.84M/1.84M [00:00<00:00, 5.89MB/s]
38
+ Downloading data: 100%|██████████| 473k/473k [00:00<00:00, 1.94MB/s]
39
+ Generating train split: 1119 examples [00:00, 29847.10 examples/s]
40
+ Generating test split: 1172 examples [00:00, 24003.26 examples/s]
41
+ Generating validation split: 299 examples [00:00, 9589.73 examples/s]
42
+ 2024-06-08:11:11:15,235 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
43
+ 2024-06-08:11:11:15,236 WARNING [task.py:322] [Task: indic_arc_challenge_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
44
+ Downloading readme: 100%|██████████| 2.19k/2.19k [00:00<00:00, 12.7MB/s]
45
+ Downloading data: 100%|██████████| 3.18M/3.18M [00:00<00:00, 15.8MB/s]
46
+ Downloading data: 100%|██████████| 3.37M/3.37M [00:00<00:00, 7.79MB/s]
47
+ Downloading data: 100%|██████████| 808k/808k [00:00<00:00, 8.55MB/s]
48
+ Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 37212.97 examples/s]
49
+ Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 32758.20 examples/s]
50
+ Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 17717.03 examples/s]
51
+ 2024-06-08:11:11:18,419 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
52
+ 2024-06-08:11:11:18,419 WARNING [task.py:322] [Task: indic_arc_easy_hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
53
+ Downloading readme: 100%|██████████| 1.53k/1.53k [00:00<00:00, 9.19MB/s]
54
+ Downloading data: 100%|██████████| 22.1M/22.1M [00:01<00:00, 18.4MB/s]
55
+ Downloading data: 100%|██████████| 7.55M/7.55M [00:00<00:00, 15.9MB/s]
56
+ Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 36151.38 examples/s]
57
+ Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 35974.85 examples/s]
58
+ 2024-06-08:11:11:22,746 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean
59
+ 2024-06-08:11:11:22,746 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
60
+ 2024-06-08:11:11:22,746 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1
61
+ 2024-06-08:11:11:22,747 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True
62
+ Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 63.6MB/s]
63
+ Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 3.52MB/s]
64
+ Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 527kB/s]
65
+ Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.12MB/s]
66
+ Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 415140.91 examples/s]
67
+ Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 181260.04 examples/s]
68
+ Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 387924.21 examples/s]
69
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa
70
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
71
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
72
+ warnings.warn(
73
+ Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 22.5MB/s]
74
+ Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 31.5MB/s]
75
+ Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.19MB/s]
76
+ Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 53.8MB/s]
77
+ Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23818.46 examples/s]
78
+ Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24787.45 examples/s]
79
+ Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 23782.31 examples/s]
80
+ 2024-06-08:11:11:32,824 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean
81
+ 2024-06-08:11:11:32,825 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
82
+ Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 20.6MB/s]
83
+ Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 508kB/s]
84
+ Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 1.02MB/s]
85
+ Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1414143.23 examples/s]
86
+ Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 372827.02 examples/s]
87
+ Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 598998.32 examples/s]
88
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1491: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande
89
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
90
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
91
+ warnings.warn(
92
+ Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 24.7MB/s]
93
+ Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 20.7MB/s]
94
+ Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 6.98MB/s]
95
+ Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 23624.41 examples/s]
96
+ Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24427.28 examples/s]
97
+ Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23893.31 examples/s]
98
+ 2024-06-08:11:11:45,062 INFO [task.py:395] Building contexts for winogrande on rank 0...
99
+ 100%|██████████| 1267/1267 [00:00<00:00, 68770.65it/s]
100
+ 2024-06-08:11:11:45,158 INFO [task.py:395] Building contexts for sst2 on rank 0...
101
+ 100%|██████████| 872/872 [00:00<00:00, 2509.76it/s]
102
+ 2024-06-08:11:11:45,533 INFO [task.py:395] Building contexts for piqa on rank 0...
103
+ 100%|██████████| 408/408 [00:00<00:00, 1840.66it/s]s]
104
+ 2024-06-08:11:11:47,339 INFO [task.py:395] Building contexts for mrpc on rank 0...
105
+ 100%|██████████| 408/408 [00:00<00:00, 1840.66it/s]s]
106
+ 2024-06-08:11:11:47,581 INFO [task.py:395] Building contexts for indic_boolq_hi on rank 0...
107
+ 100%|██████████| 3270/3270 [00:00<00:00, 3677.79it/s]
108
+ 2024-06-08:11:11:48,639 INFO [task.py:395] Building contexts for indic_arc_easy_hi on rank 0...
109
+ 100%|██████████| 2376/2376 [00:02<00:00, 1029.09it/s]
110
+ 2024-06-08:11:11:51,186 INFO [task.py:395] Building contexts for indic_arc_challenge_hi on rank 0...
111
+ 100%|██████████| 1172/1172 [00:01<00:00, 1125.50it/s]
112
+ 2024-06-08:11:11:52,347 INFO [task.py:395] Building contexts for copa on rank 0...
113
+ 100%|██████████| 100/100 [00:00<00:00, 62211.57it/s]
114
+ 2024-06-08:11:11:52,356 INFO [task.py:395] Building contexts for boolq on rank 0...
115
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
116
+ 2024-06-08:11:11:54,145 INFO [task.py:395] Building contexts for arc_easy on rank 0...
117
+ 24%|██▍ | 581/2376 [00:00<00:01, 1143.34it/s]
118
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
119
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
120
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
121
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
122
+ Passed argument batch_size = auto:1. Detecting largest batch size
123
+ Determined largest batch size: 64
124
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
125
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
126
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
127
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
128
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
129
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
130
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
131
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
132
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
133
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
134
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
135
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
136
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
137
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
138
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
139
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
140
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
141
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
142
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
143
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
144
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
145
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
146
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
147
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
148
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
149
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
150
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
151
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
152
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
153
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
154
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
155
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
156
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
157
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
158
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
159
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
160
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
161
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
162
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
163
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
164
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
165
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
166
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
167
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
168
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
169
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
170
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
171
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
172
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
173
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
174
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
175
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
176
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
177
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
178
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
179
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
180
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
181
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
182
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
183
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
184
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
185
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
186
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
187
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
188
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
189
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
190
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
191
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
192
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
193
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
194
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
195
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
196
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
197
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
198
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
199
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
200
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
201
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
202
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
203
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
204
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
205
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
206
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
207
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
208
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
209
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
210
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
211
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
212
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
213
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
214
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
215
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
216
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
217
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
218
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
219
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
220
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
221
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
222
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
223
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
224
+ 100%|██████��███| 3270/3270 [00:01<00:00, 1979.26it/s]
225
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
226
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
227
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
228
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
229
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
230
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
231
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
232
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
233
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
234
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
235
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
236
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
237
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
238
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
239
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
240
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
241
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
242
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
243
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
244
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
245
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
246
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
247
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
248
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
249
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
250
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
251
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
252
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
253
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
254
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
255
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
256
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
257
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
258
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
259
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
260
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
261
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
262
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
263
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
264
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
265
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
266
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
267
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
268
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
269
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
270
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
271
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
272
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
273
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
274
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
275
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
276
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
277
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
278
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
279
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
280
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
281
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
282
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
283
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
284
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
285
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
286
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
287
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
288
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
289
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
290
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
291
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
292
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
293
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
294
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
295
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
296
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
297
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
298
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
299
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
300
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
301
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
302
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
303
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
304
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
305
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
306
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
307
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
308
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
309
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
310
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
311
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
312
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
313
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
314
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
315
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
316
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
317
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
318
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
319
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
320
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
321
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
322
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
323
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
324
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
325
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
326
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
327
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
328
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
329
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
330
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
331
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
332
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
333
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
334
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
335
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
336
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
337
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
338
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
339
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
340
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
341
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
342
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
343
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
344
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
345
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
346
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
347
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
348
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
349
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
350
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
351
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
352
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
353
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
354
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
355
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
356
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
357
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
358
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
359
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
360
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
361
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
362
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
363
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
364
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
365
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
366
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
367
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
368
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
369
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
370
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
371
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
372
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
373
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
374
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
375
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
376
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
377
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
378
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
379
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
380
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
381
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
382
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
383
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
384
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
385
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
386
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
387
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
388
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
389
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
390
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
391
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
392
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
393
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
394
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
395
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
396
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
397
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
398
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
399
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
400
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
401
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
402
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
403
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
404
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
405
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
406
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
407
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
408
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
409
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
410
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
411
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
412
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
413
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
414
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
415
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
416
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
417
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
418
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
419
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
420
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
421
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
422
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
423
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
424
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
425
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
426
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
427
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
428
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
429
+ 100%|█��████████| 3270/3270 [00:01<00:00, 1979.26it/s]
430
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
431
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
432
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
433
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
434
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
435
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
436
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
437
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
438
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
439
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
440
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
441
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
442
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
443
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
444
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
445
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
446
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
447
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
448
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
449
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
450
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
451
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
452
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
453
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
454
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
455
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
456
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
457
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
458
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
459
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
460
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
461
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
462
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
463
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
464
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
465
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
466
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
467
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
468
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
469
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
470
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
471
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
472
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
473
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
474
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
475
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
476
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
477
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
478
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
479
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
480
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
481
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
482
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
483
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
484
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
485
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
486
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
487
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
488
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
489
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
490
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
491
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
492
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
493
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
494
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
495
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
496
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
497
+ 100%|████████���█| 3270/3270 [00:01<00:00, 1979.26it/s]
498
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
499
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
500
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
501
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
502
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
503
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
504
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
505
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
506
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
507
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
508
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
509
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
510
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
511
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
512
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
513
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
514
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
515
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
516
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
517
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
518
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
519
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
520
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
521
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
522
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
523
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
524
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
525
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
526
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
527
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
528
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
529
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
530
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
531
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
532
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
533
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
534
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
535
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
536
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
537
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
538
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
539
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
540
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
541
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
542
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
543
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
544
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
545
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
546
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
547
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
548
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
549
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
550
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
551
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
552
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
553
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
554
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
555
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
556
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
557
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
558
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
559
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
560
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
561
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
562
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
563
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
564
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
565
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
566
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
567
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
568
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
569
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
570
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
571
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
572
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
573
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
574
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
575
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
576
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
577
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
578
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
579
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
580
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
581
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
582
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
583
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
584
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
585
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
586
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
587
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
588
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
589
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
590
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
591
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
592
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
593
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
594
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
595
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
596
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
597
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
598
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
599
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
600
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
601
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
602
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
603
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
604
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
605
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
606
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
607
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
608
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
609
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
610
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
611
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
612
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
613
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
614
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
615
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
616
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
617
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
618
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
619
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
620
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
621
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
622
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
623
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
624
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
625
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
626
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
627
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
628
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
629
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
630
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
631
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
632
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
633
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
634
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
635
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
636
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
637
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
638
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
639
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
640
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
641
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
642
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
643
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
644
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
645
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
646
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
647
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
648
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
649
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
650
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
651
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
652
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
653
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
654
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
655
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
656
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
657
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
658
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
659
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
660
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
661
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
662
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
663
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
664
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
665
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
666
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
667
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
668
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
669
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
670
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
671
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
672
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
673
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
674
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
675
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
676
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
677
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
678
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
679
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
680
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
681
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
682
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
683
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
684
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
685
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
686
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
687
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
688
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
689
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
690
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
691
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
692
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
693
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
694
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
695
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
696
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
697
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
698
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
699
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
700
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
701
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
702
+ 100%|███���██████| 3270/3270 [00:01<00:00, 1979.26it/s]
703
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
704
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
705
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
706
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
707
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
708
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
709
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
710
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
711
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
712
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
713
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
714
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
715
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
716
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
717
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
718
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
719
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
720
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
721
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
722
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
723
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
724
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
725
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
726
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
727
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
728
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
729
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
730
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
731
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
732
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
733
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
734
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
735
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
736
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
737
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
738
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
739
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
740
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
741
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
742
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
743
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
744
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
745
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
746
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
747
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
748
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
749
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
750
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
751
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
752
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
753
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
754
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
755
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
756
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
757
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
758
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
759
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
760
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
761
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
762
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
763
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
764
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
765
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
766
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
767
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
768
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
769
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
770
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
771
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
772
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
773
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
774
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
775
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
776
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
777
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
778
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
779
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
780
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
781
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
782
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
783
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
784
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
785
+ bootstrapping for stddev: f1_score
786
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
787
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
788
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
789
+ 100%|██████████| 3270/3270 [00:01<00:00, 1979.26it/s]
790
+ hf (pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step100000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer), gen_kwargs: (None), limit: None, num_fewshot: None, batch_size: auto (64)
791
+ | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|
792
+ |----------------------|------:|------|-----:|--------|-----:|---|-----:|
793
+ |winogrande | 1|none | 0|acc |0.4854|± |0.0140|
794
+ |sst2 | 1|none | 0|acc |0.5161|± |0.0169|
795
+ |piqa | 1|none | 0|acc |0.5321|± |0.0116|
796
+ | | |none | 0|acc_norm|0.4935|± |0.0117|
797
+ |mrpc | 1|none | 0|acc |0.3162|± |0.0230|
798
+ | | |none | 0|f1 |0.0000|± |0.0000|
799
+ |indic_boolq_hi | 1|none | 0|acc |0.6217|± |0.0085|
800
+ |indic_arc_easy_hi | 1|none | 0|acc |0.2508|± |0.0089|
801
+ |indic_arc_challenge_hi| 1|none | 0|acc |0.2073|± |0.0118|
802
+ |copa | 1|none | 0|acc |0.5800|± |0.0496|
803
+ |boolq | 2|none | 0|acc |0.3807|± |0.0085|
804
+ |arc_easy | 1|none | 0|acc |0.2635|± |0.0090|
805
+ | | |none | 0|acc_norm|0.2668|± |0.0091|
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/requirements.txt ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.31.0
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.2
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.3
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pip==22.0.2
81
+ pip==23.3.1
82
+ platformdirs==4.2.1
83
+ pluggy==1.5.0
84
+ portalocker==2.8.2
85
+ pre-commit==3.3.3
86
+ pretty-errors==1.2.25
87
+ protobuf==3.20.3
88
+ psutil==5.9.8
89
+ py-cpuinfo==9.0.0
90
+ pyarrow-hotfix==0.6
91
+ pyarrow==16.1.0
92
+ pyasn1==0.6.0
93
+ pyasn1_modules==0.4.0
94
+ pybind11==2.10.4
95
+ pycparser==2.22
96
+ pydantic==1.10.13
97
+ pynvml==8.0.4
98
+ pytablewriter==1.2.0
99
+ pytest==8.2.0
100
+ python-dateutil==2.9.0.post0
101
+ pytorch-lightning==2.2.4
102
+ pytz==2024.1
103
+ regex==2023.5.5
104
+ requests-oauthlib==2.0.0
105
+ requests==2.32.3
106
+ rouge_score==0.1.2
107
+ rsa==4.9
108
+ sacrebleu==2.4.2
109
+ safetensors==0.4.3
110
+ scikit-learn==1.5.0
111
+ scipy==1.13.1
112
+ sentencepiece==0.2.0
113
+ sentry-sdk==2.5.1
114
+ setproctitle==1.3.3
115
+ setuptools==59.6.0
116
+ setuptools==69.5.1
117
+ six==1.16.0
118
+ smmap==5.0.1
119
+ sqlitedict==2.1.0
120
+ symengine==0.11.0
121
+ sympy==1.12
122
+ tabledata==1.3.3
123
+ tabulate==0.9.0
124
+ tcolorpy==0.1.6
125
+ tdqm==0.0.1
126
+ tensorboard-data-server==0.6.1
127
+ tensorboard-plugin-wit==1.8.1
128
+ tensorboard==2.11.2
129
+ threadpoolctl==3.5.0
130
+ tokenizers==0.15.2
131
+ tomli==2.0.1
132
+ torch==2.2.0a0+git8964477
133
+ torch_tb_profiler==0.4.0
134
+ torchaudio==2.2.0+08901ad
135
+ torchdata==0.7.1+5e6f7b7
136
+ torchmetrics==1.4.0
137
+ torchtext==0.17.0+400da5c
138
+ torchvision==0.17.0+b2383d4
139
+ tqdm-multiprocess==0.0.11
140
+ tqdm==4.66.4
141
+ transformers==4.36.2
142
+ typepy==1.3.2
143
+ typing_extensions==4.11.0
144
+ tzdata==2024.1
145
+ urllib3==1.26.18
146
+ virtualenv==20.26.1
147
+ wandb==0.17.1
148
+ wheel==0.37.1
149
+ wheel==0.43.0
150
+ word2number==1.1
151
+ xxhash==3.4.1
152
+ yamllint==1.35.1
153
+ yarl==1.9.4
154
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-06-08T11:10:26.878917",
5
+ "startedAt": "2024-06-08T11:10:26.443305",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step100000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer",
13
+ "--tasks",
14
+ "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq,indic_arc_easy_hi,indic_arc_challenge_hi,indic_boolq_hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=engl-hi-eval,name=globalstep_100000,group=exp2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2334.37498125,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.002,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 214.5283966064453
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379501342773
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"winogrande/alias": "winogrande", "sst2/alias": "sst2", "piqa/alias": "piqa", "mrpc/alias": "mrpc", "indic_boolq_hi/alias": "indic_boolq_hi", "indic_arc_easy_hi/alias": "indic_arc_easy_hi", "indic_arc_challenge_hi/alias": "indic_arc_challenge_hi", "copa/alias": "copa", "boolq/alias": "boolq", "arc_easy/alias": "arc_easy", "winogrande/acc": 0.48539857932123126, "winogrande/acc_stderr": 0.014046492383275837, "sst2/acc": 0.5160550458715596, "sst2/acc_stderr": 0.01693311741999099, "piqa/acc": 0.5321001088139282, "piqa/acc_stderr": 0.01164175801482013, "piqa/acc_norm": 0.49347116430903154, "piqa/acc_norm_stderr": 0.01166482959521097, "mrpc/acc": 0.3161764705882353, "mrpc/acc_stderr": 0.023048336668420193, "mrpc/f1": 0.0, "mrpc/f1_stderr": 0.0, "indic_boolq_hi/acc": 0.6217125382262997, "indic_boolq_hi/acc_stderr": 0.008482001133930994, "indic_arc_easy_hi/acc": 0.25084175084175087, "indic_arc_easy_hi/acc_stderr": 0.008895183010487388, "indic_arc_challenge_hi/acc": 0.20733788395904437, "indic_arc_challenge_hi/acc_stderr": 0.011846905782971373, "copa/acc": 0.58, "copa/acc_stderr": 0.049604496374885836, "boolq/acc": 0.38073394495412843, "boolq/acc_stderr": 0.008492625561656218, "arc_easy/acc": 0.26346801346801346, "arc_easy/acc_stderr": 0.009039157374497722, "arc_easy/acc_norm": 0.2668350168350168, "arc_easy/acc_norm_stderr": 0.00907591585926726, "_timestamp": 1717849398.984251, "_runtime": 4372.5210790634155, "_step": 1, "evaluation/eval_results": {"_type": "table-file", "sha256": "6529e3311149275b8699b4a67df73b70fcae908dd4f7d4af4f1c71c1b876ceba", "size": 984, "artifact_path": "wandb-client-artifact://zo4fjo3s3gxn5pzewdndrodktw0wj3vlmo930nos5bjauxgdso83b1v093mu7m3sjuc5l6ifeylay3tou90n1h393ka4vaikp9fzx5ch4qy9hs46r1kolxfyzeg1spz2/evaluation/eval_results.table.json", "_latest_artifact_path": "wandb-client-artifact://c0hwnhshjspl5o1obcimj38jhq5xicsi4rv04t2hvsil0gtkz1fygm2k2h673hytf5f073wg35atyzrch6c7p9zvw230orttwtjxy8zum5c2znzs56jehwecqu6nxcuj:latest/evaluation/eval_results.table.json", "path": "media/table/evaluation/eval_results_1_6529e3311149275b8699.table.json", "ncols": 7, "nrows": 13}, "_wandb": {"runtime": 4374}}
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug.log ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-06-08 11:10:26,457 INFO MainThread:833 [wandb_setup.py:_flush():76] Current SDK version is 0.17.1
2
+ 2024-06-08 11:10:26,457 INFO MainThread:833 [wandb_setup.py:_flush():76] Configure stats pid to 833
3
+ 2024-06-08 11:10:26,457 INFO MainThread:833 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-06-08 11:10:26,457 INFO MainThread:833 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-06-08 11:10:26,458 WARNING MainThread:833 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug.log
11
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_111026-9apxn9eo/logs/debug-internal.log
12
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:init():560] calling init triggers
13
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:init():610] starting backend
16
+ 2024-06-08 11:10:26,458 INFO MainThread:833 [wandb_init.py:init():614] setting up manager
17
+ 2024-06-08 11:10:26,461 INFO MainThread:833 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-06-08 11:10:26,462 INFO MainThread:833 [wandb_init.py:init():622] backend started and connected
19
+ 2024-06-08 11:10:26,466 INFO MainThread:833 [wandb_init.py:init():711] updated telemetry
20
+ 2024-06-08 11:10:26,475 INFO MainThread:833 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-06-08 11:10:26,724 INFO MainThread:833 [wandb_run.py:_on_init():2402] communicating current version
22
+ 2024-06-08 11:10:26,754 INFO MainThread:833 [wandb_run.py:_on_init():2411] got version response
23
+ 2024-06-08 11:10:26,754 INFO MainThread:833 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-06-08 11:10:27,095 INFO MainThread:833 [wandb_run.py:_console_start():2380] atexit reg
25
+ 2024-06-08 11:10:27,095 INFO MainThread:833 [wandb_run.py:_redirect():2235] redirect: wrap_raw
26
+ 2024-06-08 11:10:27,095 INFO MainThread:833 [wandb_run.py:_redirect():2300] Wrapping output streams.
27
+ 2024-06-08 11:10:27,095 INFO MainThread:833 [wandb_run.py:_redirect():2325] Redirects installed.
28
+ 2024-06-08 11:10:27,099 INFO MainThread:833 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-06-08 12:23:18,575 INFO MainThread:833 [wandb_run.py:_config_callback():1382] config_cb None None {'task_configs': {'arc_easy': {'task': 'arc_easy', 'group': ['ai2_arc'], 'dataset_path': 'allenai/ai2_arc', 'dataset_name': 'ARC-Easy', 'training_split': 'train', 'validation_split': 'validation', 'test_split': 'test', 'doc_to_text': 'Question: {{question}}\nAnswer:', 'doc_to_target': '{{choices.label.index(answerKey)}}', 'doc_to_choice': '{{choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'boolq': {'task': 'boolq', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'boolq', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{passage}}\nQuestion: {{question}}?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'passage', 'metadata': {'version': 2.0}}, 'copa': {'task': 'copa', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'copa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n # Drop the period\n connector = {\n "cause": "because",\n "effect": "therefore",\n }[doc["question"]]\n return doc["premise"].strip()[:-1] + f" {connector}"\n', 'doc_to_target': 'def doc_to_target(doc):\n correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]\n # Connect the sentences\n return " " + convert_choice(correct_choice)\n', 'doc_to_choice': 'def doc_to_choice(doc):\n return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'indic_arc_challenge_hi': {'task': 'indic_arc_challenge_hi', 'group': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_arc_easy_hi': {'task': 'indic_arc_easy_hi', 'group': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_boolq_hi': {'task': 'indic_boolq_hi', 'group': 'Cognitive-Lab/Indic-BoolQ', 'dataset_path': 'Cognitive-Lab/Indic-BoolQ', 'dataset_name': 'hi', 'validation_split': 'validation', 'doc_to_text': 'Passage: {translated_passage}\nQuestion: {translated_question.strip()}\nAnswer:', 'doc_to_target': 'answer', 'doc_to_choice': ['true', 'false'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'mrpc': {'task': 'mrpc', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'mrpc', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}, {'metric': 'f1'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'piqa': {'task': 'piqa', 'dataset_path': 'piqa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Question: {{goal}}\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': '{{[sol1, sol2]}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'goal', 'metadata': {'version': 1.0}}, 'sst2': {'task': 'sst2', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'sst2', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['negative', 'positive'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'winogrande': {'task': 'winogrande', 'dataset_path': 'winogrande', 'dataset_name': 'winogrande_xl', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n answer_to_num = {"1": 0, "2": 1}\n return answer_to_num[doc["answer"]]\n', 'doc_to_target': 'def doc_to_target(doc):\n idx = doc["sentence"].index("_") + 1\n return doc["sentence"][idx:].strip()\n', 'doc_to_choice': 'def doc_to_choice(doc):\n idx = doc["sentence"].index("_")\n options = [doc["option1"], doc["option2"]]\n return [doc["sentence"][:idx] + opt for opt in options]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'sentence', 'metadata': {'version': 1.0}}}, 'cli_configs': {'model': 'hf', 'model_args': 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step100000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer', 'batch_size': 'auto', 'batch_sizes': [64], 'device': None, 'use_cache': None, 'limit': None, 'bootstrap_iters': 100000, 'gen_kwargs': None}}
30
+ 2024-06-08 12:23:19,219 INFO MainThread:833 [wandb_run.py:_finish():2109] finishing run smlgenai/engl-hi-eval/9apxn9eo
31
+ 2024-06-08 12:23:19,219 INFO MainThread:833 [wandb_run.py:_atexit_cleanup():2349] got exitcode: 0
32
+ 2024-06-08 12:23:19,220 INFO MainThread:833 [wandb_run.py:_restore():2332] restore
33
+ 2024-06-08 12:23:19,220 INFO MainThread:833 [wandb_run.py:_restore():2338] restore done
34
+ 2024-06-08 12:23:24,834 INFO MainThread:833 [wandb_run.py:_footer_history_summary_info():4008] rendering history
35
+ 2024-06-08 12:23:24,835 INFO MainThread:833 [wandb_run.py:_footer_history_summary_info():4040] rendering summary
36
+ 2024-06-08 12:23:24,841 INFO MainThread:833 [wandb_run.py:_footer_sync_info():3967] logging synced files
venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/configuration_align.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/convert_align_tf_to_hf.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/modeling_align.cpython-310.pyc ADDED
Binary file (50.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/align/__pycache__/processing_align.cpython-310.pyc ADDED
Binary file (5.68 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/align/configuration_align.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ALIGN model configuration"""
16
+
17
+ import os
18
+ from typing import TYPE_CHECKING, List, Union
19
+
20
+
21
+ if TYPE_CHECKING:
22
+ pass
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import ALIGN_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class AlignTextConfig(PretrainedConfig):
35
+ r"""
36
+ This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
37
+ ALIGN text encoder according to the specified arguments, defining the model architecture. Instantiating a
38
+ configuration with the defaults will yield a similar configuration to that of the text encoder of the ALIGN
39
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values here are
40
+ copied from BERT.
41
+
42
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
43
+ documentation from [`PretrainedConfig`] for more information.
44
+
45
+ Args:
46
+ vocab_size (`int`, *optional*, defaults to 30522):
47
+ Vocabulary size of the Align Text model. Defines the number of different tokens that can be represented by
48
+ the `inputs_ids` passed when calling [`AlignTextModel`].
49
+ hidden_size (`int`, *optional*, defaults to 768):
50
+ Dimensionality of the encoder layers and the pooler layer.
51
+ num_hidden_layers (`int`, *optional*, defaults to 12):
52
+ Number of hidden layers in the Transformer encoder.
53
+ num_attention_heads (`int`, *optional*, defaults to 12):
54
+ Number of attention heads for each attention layer in the Transformer encoder.
55
+ intermediate_size (`int`, *optional*, defaults to 3072):
56
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
57
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
58
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
59
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
60
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
62
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
63
+ The dropout ratio for the attention probabilities.
64
+ max_position_embeddings (`int`, *optional*, defaults to 512):
65
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
66
+ just in case (e.g., 512 or 1024 or 2048).
67
+ type_vocab_size (`int`, *optional*, defaults to 2):
68
+ The vocabulary size of the `token_type_ids` passed when calling [`AlignTextModel`].
69
+ initializer_range (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
72
+ The epsilon used by the layer normalization layers.
73
+ pad_token_id (`int`, *optional*, defaults to 0):
74
+ Padding token id.
75
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
76
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
77
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
78
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
79
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
80
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
81
+ use_cache (`bool`, *optional*, defaults to `True`):
82
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
83
+ relevant if `config.is_decoder=True`.
84
+
85
+ Example:
86
+
87
+ ```python
88
+ >>> from transformers import AlignTextConfig, AlignTextModel
89
+
90
+ >>> # Initializing a AlignTextConfig with kakaobrain/align-base style configuration
91
+ >>> configuration = AlignTextConfig()
92
+
93
+ >>> # Initializing a AlignTextModel (with random weights) from the kakaobrain/align-base style configuration
94
+ >>> model = AlignTextModel(configuration)
95
+
96
+ >>> # Accessing the model configuration
97
+ >>> configuration = model.config
98
+ ```"""
99
+
100
+ model_type = "align_text_model"
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=30522,
105
+ hidden_size=768,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=12,
108
+ intermediate_size=3072,
109
+ hidden_act="gelu",
110
+ hidden_dropout_prob=0.1,
111
+ attention_probs_dropout_prob=0.1,
112
+ max_position_embeddings=512,
113
+ type_vocab_size=2,
114
+ initializer_range=0.02,
115
+ layer_norm_eps=1e-12,
116
+ pad_token_id=0,
117
+ position_embedding_type="absolute",
118
+ use_cache=True,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(**kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.hidden_act = hidden_act
128
+ self.intermediate_size = intermediate_size
129
+ self.hidden_dropout_prob = hidden_dropout_prob
130
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
131
+ self.max_position_embeddings = max_position_embeddings
132
+ self.type_vocab_size = type_vocab_size
133
+ self.initializer_range = initializer_range
134
+ self.layer_norm_eps = layer_norm_eps
135
+ self.position_embedding_type = position_embedding_type
136
+ self.use_cache = use_cache
137
+ self.pad_token_id = pad_token_id
138
+
139
+ @classmethod
140
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
141
+ cls._set_token_in_kwargs(kwargs)
142
+
143
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
144
+
145
+ # get the text config dict if we are loading from AlignConfig
146
+ if config_dict.get("model_type") == "align":
147
+ config_dict = config_dict["text_config"]
148
+
149
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
150
+ logger.warning(
151
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
152
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
153
+ )
154
+
155
+ return cls.from_dict(config_dict, **kwargs)
156
+
157
+
158
+ class AlignVisionConfig(PretrainedConfig):
159
+ r"""
160
+ This is the configuration class to store the configuration of a [`AlignVisionModel`]. It is used to instantiate a
161
+ ALIGN vision encoder according to the specified arguments, defining the model architecture. Instantiating a
162
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the ALIGN
163
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values are copied
164
+ from EfficientNet (efficientnet-b7)
165
+
166
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
167
+ documentation from [`PretrainedConfig`] for more information.
168
+
169
+ Args:
170
+ num_channels (`int`, *optional*, defaults to 3):
171
+ The number of input channels.
172
+ image_size (`int`, *optional*, defaults to 600):
173
+ The input image size.
174
+ width_coefficient (`float`, *optional*, defaults to 2.0):
175
+ Scaling coefficient for network width at each stage.
176
+ depth_coefficient (`float`, *optional*, defaults to 3.1):
177
+ Scaling coefficient for network depth at each stage.
178
+ depth_divisor `int`, *optional*, defaults to 8):
179
+ A unit of network width.
180
+ kernel_sizes (`List[int]`, *optional*, defaults to `[3, 3, 5, 3, 5, 5, 3]`):
181
+ List of kernel sizes to be used in each block.
182
+ in_channels (`List[int]`, *optional*, defaults to `[32, 16, 24, 40, 80, 112, 192]`):
183
+ List of input channel sizes to be used in each block for convolutional layers.
184
+ out_channels (`List[int]`, *optional*, defaults to `[16, 24, 40, 80, 112, 192, 320]`):
185
+ List of output channel sizes to be used in each block for convolutional layers.
186
+ depthwise_padding (`List[int]`, *optional*, defaults to `[]`):
187
+ List of block indices with square padding.
188
+ strides (`List[int]`, *optional*, defaults to `[1, 2, 2, 2, 1, 2, 1]`):
189
+ List of stride sizes to be used in each block for convolutional layers.
190
+ num_block_repeats (`List[int]`, *optional*, defaults to `[1, 2, 2, 3, 3, 4, 1]`):
191
+ List of the number of times each block is to repeated.
192
+ expand_ratios (`List[int]`, *optional*, defaults to `[1, 6, 6, 6, 6, 6, 6]`):
193
+ List of scaling coefficient of each block.
194
+ squeeze_expansion_ratio (`float`, *optional*, defaults to 0.25):
195
+ Squeeze expansion ratio.
196
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
197
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
198
+ `"selu", `"gelu_new"`, `"silu"` and `"mish"` are supported.
199
+ hiddem_dim (`int`, *optional*, defaults to 1280):
200
+ The hidden dimension of the layer before the classification head.
201
+ pooling_type (`str` or `function`, *optional*, defaults to `"mean"`):
202
+ Type of final pooling to be applied before the dense classification head. Available options are [`"mean"`,
203
+ `"max"`]
204
+ initializer_range (`float`, *optional*, defaults to 0.02):
205
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
206
+ batch_norm_eps (`float`, *optional*, defaults to 1e-3):
207
+ The epsilon used by the batch normalization layers.
208
+ batch_norm_momentum (`float`, *optional*, defaults to 0.99):
209
+ The momentum used by the batch normalization layers.
210
+ drop_connect_rate (`float`, *optional*, defaults to 0.2):
211
+ The drop rate for skip connections.
212
+
213
+ Example:
214
+
215
+ ```python
216
+ >>> from transformers import AlignVisionConfig, AlignVisionModel
217
+
218
+ >>> # Initializing a AlignVisionConfig with kakaobrain/align-base style configuration
219
+ >>> configuration = AlignVisionConfig()
220
+
221
+ >>> # Initializing a AlignVisionModel (with random weights) from the kakaobrain/align-base style configuration
222
+ >>> model = AlignVisionModel(configuration)
223
+
224
+ >>> # Accessing the model configuration
225
+ >>> configuration = model.config
226
+ ```"""
227
+
228
+ model_type = "align_vision_model"
229
+
230
+ def __init__(
231
+ self,
232
+ num_channels: int = 3,
233
+ image_size: int = 600,
234
+ width_coefficient: float = 2.0,
235
+ depth_coefficient: float = 3.1,
236
+ depth_divisor: int = 8,
237
+ kernel_sizes: List[int] = [3, 3, 5, 3, 5, 5, 3],
238
+ in_channels: List[int] = [32, 16, 24, 40, 80, 112, 192],
239
+ out_channels: List[int] = [16, 24, 40, 80, 112, 192, 320],
240
+ depthwise_padding: List[int] = [],
241
+ strides: List[int] = [1, 2, 2, 2, 1, 2, 1],
242
+ num_block_repeats: List[int] = [1, 2, 2, 3, 3, 4, 1],
243
+ expand_ratios: List[int] = [1, 6, 6, 6, 6, 6, 6],
244
+ squeeze_expansion_ratio: float = 0.25,
245
+ hidden_act: str = "swish",
246
+ hidden_dim: int = 2560,
247
+ pooling_type: str = "mean",
248
+ initializer_range: float = 0.02,
249
+ batch_norm_eps: float = 0.001,
250
+ batch_norm_momentum: float = 0.99,
251
+ drop_connect_rate: float = 0.2,
252
+ **kwargs,
253
+ ):
254
+ super().__init__(**kwargs)
255
+
256
+ self.num_channels = num_channels
257
+ self.image_size = image_size
258
+ self.width_coefficient = width_coefficient
259
+ self.depth_coefficient = depth_coefficient
260
+ self.depth_divisor = depth_divisor
261
+ self.kernel_sizes = kernel_sizes
262
+ self.in_channels = in_channels
263
+ self.out_channels = out_channels
264
+ self.depthwise_padding = depthwise_padding
265
+ self.strides = strides
266
+ self.num_block_repeats = num_block_repeats
267
+ self.expand_ratios = expand_ratios
268
+ self.squeeze_expansion_ratio = squeeze_expansion_ratio
269
+ self.hidden_act = hidden_act
270
+ self.hidden_dim = hidden_dim
271
+ self.pooling_type = pooling_type
272
+ self.initializer_range = initializer_range
273
+ self.batch_norm_eps = batch_norm_eps
274
+ self.batch_norm_momentum = batch_norm_momentum
275
+ self.drop_connect_rate = drop_connect_rate
276
+ self.num_hidden_layers = sum(num_block_repeats) * 4
277
+
278
+ @classmethod
279
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
280
+ cls._set_token_in_kwargs(kwargs)
281
+
282
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
283
+
284
+ # get the vision config dict if we are loading from AlignConfig
285
+ if config_dict.get("model_type") == "align":
286
+ config_dict = config_dict["vision_config"]
287
+
288
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
289
+ logger.warning(
290
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
291
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
292
+ )
293
+
294
+ return cls.from_dict(config_dict, **kwargs)
295
+
296
+
297
+ class AlignConfig(PretrainedConfig):
298
+ r"""
299
+ [`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to
300
+ instantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.
301
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN
302
+ [kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.
303
+
304
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
305
+ documentation from [`PretrainedConfig`] for more information.
306
+
307
+ Args:
308
+ text_config (`dict`, *optional*):
309
+ Dictionary of configuration options used to initialize [`AlignTextConfig`].
310
+ vision_config (`dict`, *optional*):
311
+ Dictionary of configuration options used to initialize [`AlignVisionConfig`].
312
+ projection_dim (`int`, *optional*, defaults to 640):
313
+ Dimentionality of text and vision projection layers.
314
+ temperature_init_value (`float`, *optional*, defaults to 1.0):
315
+ The inital value of the *temperature* paramter. Default is used as per the original ALIGN implementation.
316
+ initializer_range (`float`, *optional*, defaults to 0.02):
317
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
318
+ kwargs (*optional*):
319
+ Dictionary of keyword arguments.
320
+
321
+ Example:
322
+
323
+ ```python
324
+ >>> from transformers import AlignConfig, AlignModel
325
+
326
+ >>> # Initializing a AlignConfig with kakaobrain/align-base style configuration
327
+ >>> configuration = AlignConfig()
328
+
329
+ >>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration
330
+ >>> model = AlignModel(configuration)
331
+
332
+ >>> # Accessing the model configuration
333
+ >>> configuration = model.config
334
+
335
+ >>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig
336
+ >>> from transformers import AlignTextConfig, AlignVisionConfig
337
+
338
+ >>> # Initializing ALIGN Text and Vision configurations
339
+ >>> config_text = AlignTextConfig()
340
+ >>> config_vision = AlignVisionConfig()
341
+
342
+ >>> config = AlignConfig.from_text_vision_configs(config_text, config_vision)
343
+ ```"""
344
+
345
+ model_type = "align"
346
+
347
+ def __init__(
348
+ self,
349
+ text_config=None,
350
+ vision_config=None,
351
+ projection_dim=640,
352
+ temperature_init_value=1.0,
353
+ initializer_range=0.02,
354
+ **kwargs,
355
+ ):
356
+ super().__init__(**kwargs)
357
+
358
+ if text_config is None:
359
+ text_config = {}
360
+ logger.info("text_config is None. Initializing the AlignTextConfig with default values.")
361
+
362
+ if vision_config is None:
363
+ vision_config = {}
364
+ logger.info("vision_config is None. Initializing the AlignVisionConfig with default values.")
365
+
366
+ self.text_config = AlignTextConfig(**text_config)
367
+ self.vision_config = AlignVisionConfig(**vision_config)
368
+
369
+ self.projection_dim = projection_dim
370
+ self.temperature_init_value = temperature_init_value
371
+ self.initializer_range = initializer_range
372
+
373
+ @classmethod
374
+ def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):
375
+ r"""
376
+ Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model
377
+ configuration.
378
+
379
+ Returns:
380
+ [`AlignConfig`]: An instance of a configuration object
381
+ """
382
+
383
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
venv/lib/python3.10/site-packages/transformers/models/align/modeling_align.py ADDED
@@ -0,0 +1,1633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ALIGN model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutputWithNoAttention,
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import (
35
+ ModelOutput,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "kakaobrain/align-base"
47
+ _CONFIG_FOR_DOC = "AlignConfig"
48
+
49
+
50
+ from ..deprecated._archive_maps import ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
51
+
52
+
53
+ ALIGN_START_DOCSTRING = r"""
54
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
55
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
56
+ etc.)
57
+
58
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
59
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
60
+ and behavior.
61
+
62
+ Parameters:
63
+ config ([`AlignConfig`]): Model configuration class with all the parameters of the model.
64
+ Initializing with a config file does not load the weights associated with the model, only the
65
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
66
+ """
67
+
68
+ ALIGN_TEXT_INPUTS_DOCSTRING = r"""
69
+ Args:
70
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
71
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
72
+ it.
73
+
74
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
75
+ [`PreTrainedTokenizer.__call__`] for details.
76
+
77
+ [What are input IDs?](../glossary#input-ids)
78
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
79
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
80
+
81
+ - 1 for tokens that are **not masked**,
82
+ - 0 for tokens that are **masked**.
83
+
84
+ [What are attention masks?](../glossary#attention-mask)
85
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
86
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
87
+ config.max_position_embeddings - 1]`.
88
+
89
+ [What are position IDs?](../glossary#position-ids)
90
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
91
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
92
+ 1]`:
93
+
94
+ - 0 corresponds to a *sentence A* token,
95
+ - 1 corresponds to a *sentence B* token.
96
+
97
+ [What are token type IDs?](../glossary#token-type-ids)
98
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
99
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
100
+
101
+ - 1 indicates the head is **not masked**,
102
+ - 0 indicates the head is **masked**.
103
+
104
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
105
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
106
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
107
+ model's internal embedding lookup matrix.
108
+ output_attentions (`bool`, *optional*):
109
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
110
+ tensors for more detail.
111
+ output_hidden_states (`bool`, *optional*):
112
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
113
+ more detail.
114
+ return_dict (`bool`, *optional*):
115
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
116
+ """
117
+
118
+ ALIGN_VISION_INPUTS_DOCSTRING = r"""
119
+ Args:
120
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
121
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
122
+ [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
123
+ output_hidden_states (`bool`, *optional*):
124
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
125
+ more detail.
126
+ return_dict (`bool`, *optional*):
127
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
128
+ """
129
+
130
+ ALIGN_INPUTS_DOCSTRING = r"""
131
+ Args:
132
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
133
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
134
+ it.
135
+
136
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
137
+ [`PreTrainedTokenizer.__call__`] for details.
138
+
139
+ [What are input IDs?](../glossary#input-ids)
140
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
141
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
142
+
143
+ - 1 for tokens that are **not masked**,
144
+ - 0 for tokens that are **masked**.
145
+
146
+ [What are attention masks?](../glossary#attention-mask)
147
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
148
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
149
+ config.max_position_embeddings - 1]`.
150
+
151
+ [What are position IDs?](../glossary#position-ids)
152
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
153
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
154
+ 1]`:
155
+
156
+ - 0 corresponds to a *sentence A* token,
157
+ - 1 corresponds to a *sentence B* token.
158
+
159
+ [What are token type IDs?](../glossary#token-type-ids)
160
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
161
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
162
+
163
+ - 1 indicates the head is **not masked**,
164
+ - 0 indicates the head is **masked**.
165
+
166
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
167
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
168
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
169
+ model's internal embedding lookup matrix.
170
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
171
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
172
+ [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
173
+ return_loss (`bool`, *optional*):
174
+ Whether or not to return the contrastive loss.
175
+ output_attentions (`bool`, *optional*):
176
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
177
+ tensors for more detail.
178
+ output_hidden_states (`bool`, *optional*):
179
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
180
+ more detail.
181
+ return_dict (`bool`, *optional*):
182
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
183
+ """
184
+
185
+
186
+ @dataclass
187
+ class AlignVisionModelOutput(ModelOutput):
188
+ """
189
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
190
+
191
+ Args:
192
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
193
+ The image embeddings obtained by applying the projection layer to the pooler_output.
194
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
195
+ Sequence of hidden-states at the output of the last layer of the model.
196
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
197
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
198
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
199
+
200
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
201
+ """
202
+
203
+ image_embeds: Optional[torch.FloatTensor] = None
204
+ last_hidden_state: torch.FloatTensor = None
205
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
206
+
207
+
208
+ @dataclass
209
+ class AlignTextModelOutput(ModelOutput):
210
+ """
211
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
212
+
213
+ Args:
214
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
215
+ The text embeddings obtained by applying the projection layer to the pooler_output.
216
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
217
+ Sequence of hidden-states at the output of the last layer of the model.
218
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
219
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
220
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
221
+
222
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
223
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
224
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
225
+ sequence_length)`.
226
+
227
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
228
+ heads.
229
+ """
230
+
231
+ text_embeds: Optional[torch.FloatTensor] = None
232
+ last_hidden_state: torch.FloatTensor = None
233
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
234
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
235
+
236
+
237
+ @dataclass
238
+ class AlignOutput(ModelOutput):
239
+ """
240
+ Args:
241
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
242
+ Contrastive loss for image-text similarity.
243
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
244
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
245
+ similarity scores.
246
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
247
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
248
+ similarity scores.
249
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
250
+ The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
251
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
252
+ The output of [`AlignVisionModel`].
253
+ text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
254
+ The output of the [`AlignTextModel`].
255
+ vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`):
256
+ The output of the [`AlignVisionModel`].
257
+ """
258
+
259
+ loss: Optional[torch.FloatTensor] = None
260
+ logits_per_image: torch.FloatTensor = None
261
+ logits_per_text: torch.FloatTensor = None
262
+ text_embeds: torch.FloatTensor = None
263
+ image_embeds: torch.FloatTensor = None
264
+ text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None
265
+ vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
266
+
267
+ def to_tuple(self) -> Tuple[Any]:
268
+ return tuple(
269
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
270
+ for k in self.keys()
271
+ )
272
+
273
+
274
+ # contrastive loss function, adapted from
275
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
276
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
277
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1)
278
+
279
+
280
+ def align_loss(similarity: torch.Tensor) -> torch.Tensor:
281
+ caption_loss = contrastive_loss(similarity)
282
+ image_loss = contrastive_loss(similarity.t())
283
+ return (caption_loss + image_loss) / 2.0
284
+
285
+
286
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet->AlignVision
287
+ def round_filters(config: AlignVisionConfig, num_channels: int):
288
+ r"""
289
+ Round number of filters based on depth multiplier.
290
+ """
291
+ divisor = config.depth_divisor
292
+ num_channels *= config.width_coefficient
293
+ new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
294
+
295
+ # Make sure that round down does not go down by more than 10%.
296
+ if new_dim < 0.9 * num_channels:
297
+ new_dim += divisor
298
+
299
+ return int(new_dim)
300
+
301
+
302
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad
303
+ def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True):
304
+ r"""
305
+ Utility function to get the tuple padding value for the depthwise convolution.
306
+
307
+ Args:
308
+ kernel_size (`int` or `tuple`):
309
+ Kernel size of the convolution layers.
310
+ adjust (`bool`, *optional*, defaults to `True`):
311
+ Adjusts padding value to apply to right and bottom sides of the input.
312
+ """
313
+ if isinstance(kernel_size, int):
314
+ kernel_size = (kernel_size, kernel_size)
315
+
316
+ correct = (kernel_size[0] // 2, kernel_size[1] // 2)
317
+ if adjust:
318
+ return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
319
+ else:
320
+ return (correct[1], correct[1], correct[0], correct[0])
321
+
322
+
323
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision
324
+ class AlignVisionEmbeddings(nn.Module):
325
+ r"""
326
+ A module that corresponds to the stem module of the original work.
327
+ """
328
+
329
+ def __init__(self, config: AlignVisionConfig):
330
+ super().__init__()
331
+
332
+ self.out_dim = round_filters(config, 32)
333
+ self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
334
+ self.convolution = nn.Conv2d(
335
+ config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
336
+ )
337
+ self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
338
+ self.activation = ACT2FN[config.hidden_act]
339
+
340
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
341
+ features = self.padding(pixel_values)
342
+ features = self.convolution(features)
343
+ features = self.batchnorm(features)
344
+ features = self.activation(features)
345
+
346
+ return features
347
+
348
+
349
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision
350
+ class AlignVisionDepthwiseConv2d(nn.Conv2d):
351
+ def __init__(
352
+ self,
353
+ in_channels,
354
+ depth_multiplier=1,
355
+ kernel_size=3,
356
+ stride=1,
357
+ padding=0,
358
+ dilation=1,
359
+ bias=True,
360
+ padding_mode="zeros",
361
+ ):
362
+ out_channels = in_channels * depth_multiplier
363
+ super().__init__(
364
+ in_channels=in_channels,
365
+ out_channels=out_channels,
366
+ kernel_size=kernel_size,
367
+ stride=stride,
368
+ padding=padding,
369
+ dilation=dilation,
370
+ groups=in_channels,
371
+ bias=bias,
372
+ padding_mode=padding_mode,
373
+ )
374
+
375
+
376
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision
377
+ class AlignVisionExpansionLayer(nn.Module):
378
+ r"""
379
+ This corresponds to the expansion phase of each block in the original implementation.
380
+ """
381
+
382
+ def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
383
+ super().__init__()
384
+ self.expand_conv = nn.Conv2d(
385
+ in_channels=in_dim,
386
+ out_channels=out_dim,
387
+ kernel_size=1,
388
+ padding="same",
389
+ bias=False,
390
+ )
391
+ self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
392
+ self.expand_act = ACT2FN[config.hidden_act]
393
+
394
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
395
+ # Expand phase
396
+ hidden_states = self.expand_conv(hidden_states)
397
+ hidden_states = self.expand_bn(hidden_states)
398
+ hidden_states = self.expand_act(hidden_states)
399
+
400
+ return hidden_states
401
+
402
+
403
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with EfficientNet->AlignVision
404
+ class AlignVisionDepthwiseLayer(nn.Module):
405
+ r"""
406
+ This corresponds to the depthwise convolution phase of each block in the original implementation.
407
+ """
408
+
409
+ def __init__(
410
+ self,
411
+ config: AlignVisionConfig,
412
+ in_dim: int,
413
+ stride: int,
414
+ kernel_size: int,
415
+ adjust_padding: bool,
416
+ ):
417
+ super().__init__()
418
+ self.stride = stride
419
+ conv_pad = "valid" if self.stride == 2 else "same"
420
+ padding = correct_pad(kernel_size, adjust=adjust_padding)
421
+
422
+ self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
423
+ self.depthwise_conv = AlignVisionDepthwiseConv2d(
424
+ in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
425
+ )
426
+ self.depthwise_norm = nn.BatchNorm2d(
427
+ num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
428
+ )
429
+ self.depthwise_act = ACT2FN[config.hidden_act]
430
+
431
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
432
+ # Depthwise convolution
433
+ if self.stride == 2:
434
+ hidden_states = self.depthwise_conv_pad(hidden_states)
435
+
436
+ hidden_states = self.depthwise_conv(hidden_states)
437
+ hidden_states = self.depthwise_norm(hidden_states)
438
+ hidden_states = self.depthwise_act(hidden_states)
439
+
440
+ return hidden_states
441
+
442
+
443
+ # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
444
+ class AlignVisionSqueezeExciteLayer(nn.Module):
445
+ r"""
446
+ This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
447
+ """
448
+
449
+ def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False):
450
+ super().__init__()
451
+ self.dim = expand_dim if expand else in_dim
452
+ self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
453
+
454
+ self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
455
+ self.reduce = nn.Conv2d(
456
+ in_channels=self.dim,
457
+ out_channels=self.dim_se,
458
+ kernel_size=1,
459
+ padding="same",
460
+ )
461
+ self.expand = nn.Conv2d(
462
+ in_channels=self.dim_se,
463
+ out_channels=self.dim,
464
+ kernel_size=1,
465
+ padding="same",
466
+ )
467
+ self.act_reduce = ACT2FN[config.hidden_act]
468
+ self.act_expand = nn.Sigmoid()
469
+
470
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
471
+ inputs = hidden_states
472
+ hidden_states = self.squeeze(hidden_states)
473
+ hidden_states = self.reduce(hidden_states)
474
+ hidden_states = self.act_reduce(hidden_states)
475
+
476
+ hidden_states = self.expand(hidden_states)
477
+ hidden_states = self.act_expand(hidden_states)
478
+ hidden_states = torch.mul(inputs, hidden_states)
479
+
480
+ return hidden_states
481
+
482
+
483
+ class AlignVisionFinalBlockLayer(nn.Module):
484
+ r"""
485
+ This corresponds to the final phase of each block in the original implementation.
486
+ """
487
+
488
+ def __init__(
489
+ self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
490
+ ):
491
+ super().__init__()
492
+ self.apply_dropout = stride == 1 and not id_skip
493
+ self.project_conv = nn.Conv2d(
494
+ in_channels=in_dim,
495
+ out_channels=out_dim,
496
+ kernel_size=1,
497
+ padding="same",
498
+ bias=False,
499
+ )
500
+ self.project_bn = nn.BatchNorm2d(
501
+ num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
502
+ )
503
+ self.dropout = nn.Dropout(p=drop_rate)
504
+
505
+ def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
506
+ hidden_states = self.project_conv(hidden_states)
507
+ hidden_states = self.project_bn(hidden_states)
508
+
509
+ if self.apply_dropout:
510
+ hidden_states = self.dropout(hidden_states)
511
+ hidden_states = hidden_states + embeddings
512
+
513
+ return hidden_states
514
+
515
+
516
+ class AlignVisionBlock(nn.Module):
517
+ r"""
518
+ This corresponds to the block module of original the EfficientNet vision encoder implementation.
519
+
520
+ Args:
521
+ config ([`AlignVisionConfig`]):
522
+ Model configuration class.
523
+ in_dim (`int`):
524
+ Number of input channels.
525
+ out_dim (`int`):
526
+ Number of output channels.
527
+ stride (`int`):
528
+ Stride size to be used in convolution layers.
529
+ expand_ratio (`int`):
530
+ Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
531
+ kernel_size (`int`):
532
+ Kernel size for the depthwise convolution layer.
533
+ drop_rate (`float`):
534
+ Dropout rate to be used in the final phase of each block.
535
+ id_skip (`bool`):
536
+ Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
537
+ of each block. Set to `True` for the first block of each stage.
538
+ adjust_padding (`bool`):
539
+ Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
540
+ operation, set to `True` for inputs with odd input sizes.
541
+ """
542
+
543
+ def __init__(
544
+ self,
545
+ config: AlignVisionConfig,
546
+ in_dim: int,
547
+ out_dim: int,
548
+ stride: int,
549
+ expand_ratio: int,
550
+ kernel_size: int,
551
+ drop_rate: float,
552
+ id_skip: bool,
553
+ adjust_padding: bool,
554
+ ):
555
+ super().__init__()
556
+ self.expand_ratio = expand_ratio
557
+ self.expand = True if self.expand_ratio != 1 else False
558
+ expand_in_dim = in_dim * expand_ratio
559
+
560
+ if self.expand:
561
+ self.expansion = AlignVisionExpansionLayer(
562
+ config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
563
+ )
564
+
565
+ self.depthwise_conv = AlignVisionDepthwiseLayer(
566
+ config=config,
567
+ in_dim=expand_in_dim if self.expand else in_dim,
568
+ stride=stride,
569
+ kernel_size=kernel_size,
570
+ adjust_padding=adjust_padding,
571
+ )
572
+ self.squeeze_excite = AlignVisionSqueezeExciteLayer(
573
+ config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
574
+ )
575
+ self.projection = AlignVisionFinalBlockLayer(
576
+ config=config,
577
+ in_dim=expand_in_dim if self.expand else in_dim,
578
+ out_dim=out_dim,
579
+ stride=stride,
580
+ drop_rate=drop_rate,
581
+ id_skip=id_skip,
582
+ )
583
+
584
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
585
+ embeddings = hidden_states
586
+ # Expansion and depthwise convolution phase
587
+ if self.expand_ratio != 1:
588
+ hidden_states = self.expansion(hidden_states)
589
+ hidden_states = self.depthwise_conv(hidden_states)
590
+
591
+ # Squeeze and excite phase
592
+ hidden_states = self.squeeze_excite(hidden_states)
593
+ hidden_states = self.projection(embeddings, hidden_states)
594
+ return hidden_states
595
+
596
+
597
+ class AlignVisionEncoder(nn.Module):
598
+ r"""
599
+ Forward propogates the embeddings through each vision encoder (EfficientNet) block.
600
+
601
+ Args:
602
+ config ([`AlignVisionConfig`]):
603
+ Model configuration class.
604
+ """
605
+
606
+ def __init__(self, config: AlignVisionConfig):
607
+ super().__init__()
608
+ self.depth_coefficient = config.depth_coefficient
609
+
610
+ def round_repeats(repeats):
611
+ # Round number of block repeats based on depth multiplier.
612
+ return int(math.ceil(self.depth_coefficient * repeats))
613
+
614
+ num_base_blocks = len(config.in_channels)
615
+ num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
616
+
617
+ curr_block_num = 0
618
+ blocks = []
619
+ for i in range(num_base_blocks):
620
+ in_dim = round_filters(config, config.in_channels[i])
621
+ out_dim = round_filters(config, config.out_channels[i])
622
+ stride = config.strides[i]
623
+ kernel_size = config.kernel_sizes[i]
624
+ expand_ratio = config.expand_ratios[i]
625
+
626
+ for j in range(round_repeats(config.num_block_repeats[i])):
627
+ id_skip = True if j == 0 else False
628
+ stride = 1 if j > 0 else stride
629
+ in_dim = out_dim if j > 0 else in_dim
630
+ adjust_padding = False if curr_block_num in config.depthwise_padding else True
631
+ drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
632
+
633
+ block = AlignVisionBlock(
634
+ config=config,
635
+ in_dim=in_dim,
636
+ out_dim=out_dim,
637
+ stride=stride,
638
+ kernel_size=kernel_size,
639
+ expand_ratio=expand_ratio,
640
+ drop_rate=drop_rate,
641
+ id_skip=id_skip,
642
+ adjust_padding=adjust_padding,
643
+ )
644
+ blocks.append(block)
645
+ curr_block_num += 1
646
+
647
+ self.blocks = nn.ModuleList(blocks)
648
+
649
+ def forward(
650
+ self,
651
+ hidden_states: torch.FloatTensor,
652
+ output_hidden_states: Optional[bool] = False,
653
+ return_dict: Optional[bool] = True,
654
+ ) -> BaseModelOutputWithPoolingAndNoAttention:
655
+ all_hidden_states = (hidden_states,) if output_hidden_states else None
656
+
657
+ for block in self.blocks:
658
+ hidden_states = block(hidden_states)
659
+ if output_hidden_states:
660
+ all_hidden_states += (hidden_states,)
661
+
662
+ if not return_dict:
663
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
664
+
665
+ return BaseModelOutputWithNoAttention(
666
+ last_hidden_state=hidden_states,
667
+ hidden_states=all_hidden_states,
668
+ )
669
+
670
+
671
+ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->AlignText
672
+ class AlignTextEmbeddings(nn.Module):
673
+ """Construct the embeddings from word, position and token_type embeddings."""
674
+
675
+ def __init__(self, config):
676
+ super().__init__()
677
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
678
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
679
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
680
+
681
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
682
+ # any TensorFlow checkpoint file
683
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
684
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
685
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
686
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
687
+ self.register_buffer(
688
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
689
+ )
690
+ self.register_buffer(
691
+ "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
692
+ )
693
+
694
+ def forward(
695
+ self,
696
+ input_ids: Optional[torch.LongTensor] = None,
697
+ token_type_ids: Optional[torch.LongTensor] = None,
698
+ position_ids: Optional[torch.LongTensor] = None,
699
+ inputs_embeds: Optional[torch.FloatTensor] = None,
700
+ past_key_values_length: int = 0,
701
+ ) -> torch.Tensor:
702
+ if input_ids is not None:
703
+ input_shape = input_ids.size()
704
+ else:
705
+ input_shape = inputs_embeds.size()[:-1]
706
+
707
+ seq_length = input_shape[1]
708
+
709
+ if position_ids is None:
710
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
711
+
712
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
713
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
714
+ # issue #5664
715
+ if token_type_ids is None:
716
+ if hasattr(self, "token_type_ids"):
717
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
718
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
719
+ token_type_ids = buffered_token_type_ids_expanded
720
+ else:
721
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
722
+
723
+ if inputs_embeds is None:
724
+ inputs_embeds = self.word_embeddings(input_ids)
725
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
726
+
727
+ embeddings = inputs_embeds + token_type_embeddings
728
+ if self.position_embedding_type == "absolute":
729
+ position_embeddings = self.position_embeddings(position_ids)
730
+ embeddings += position_embeddings
731
+ embeddings = self.LayerNorm(embeddings)
732
+ embeddings = self.dropout(embeddings)
733
+ return embeddings
734
+
735
+
736
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->AlignText
737
+ class AlignTextSelfAttention(nn.Module):
738
+ def __init__(self, config, position_embedding_type=None):
739
+ super().__init__()
740
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
741
+ raise ValueError(
742
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
743
+ f"heads ({config.num_attention_heads})"
744
+ )
745
+
746
+ self.num_attention_heads = config.num_attention_heads
747
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
748
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
749
+
750
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
751
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
752
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
753
+
754
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
755
+ self.position_embedding_type = position_embedding_type or getattr(
756
+ config, "position_embedding_type", "absolute"
757
+ )
758
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
759
+ self.max_position_embeddings = config.max_position_embeddings
760
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
761
+
762
+ self.is_decoder = config.is_decoder
763
+
764
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
765
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
766
+ x = x.view(new_x_shape)
767
+ return x.permute(0, 2, 1, 3)
768
+
769
+ def forward(
770
+ self,
771
+ hidden_states: torch.Tensor,
772
+ attention_mask: Optional[torch.FloatTensor] = None,
773
+ head_mask: Optional[torch.FloatTensor] = None,
774
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
775
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
776
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
777
+ output_attentions: Optional[bool] = False,
778
+ ) -> Tuple[torch.Tensor]:
779
+ mixed_query_layer = self.query(hidden_states)
780
+
781
+ # If this is instantiated as a cross-attention module, the keys
782
+ # and values come from an encoder; the attention mask needs to be
783
+ # such that the encoder's padding tokens are not attended to.
784
+ is_cross_attention = encoder_hidden_states is not None
785
+
786
+ if is_cross_attention and past_key_value is not None:
787
+ # reuse k,v, cross_attentions
788
+ key_layer = past_key_value[0]
789
+ value_layer = past_key_value[1]
790
+ attention_mask = encoder_attention_mask
791
+ elif is_cross_attention:
792
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
793
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
794
+ attention_mask = encoder_attention_mask
795
+ elif past_key_value is not None:
796
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
797
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
798
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
799
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
800
+ else:
801
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
802
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
803
+
804
+ query_layer = self.transpose_for_scores(mixed_query_layer)
805
+
806
+ use_cache = past_key_value is not None
807
+ if self.is_decoder:
808
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
809
+ # Further calls to cross_attention layer can then reuse all cross-attention
810
+ # key/value_states (first "if" case)
811
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
812
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
813
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
814
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
815
+ past_key_value = (key_layer, value_layer)
816
+
817
+ # Take the dot product between "query" and "key" to get the raw attention scores.
818
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
819
+
820
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
821
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
822
+ if use_cache:
823
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
824
+ -1, 1
825
+ )
826
+ else:
827
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
828
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
829
+ distance = position_ids_l - position_ids_r
830
+
831
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
832
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
833
+
834
+ if self.position_embedding_type == "relative_key":
835
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
836
+ attention_scores = attention_scores + relative_position_scores
837
+ elif self.position_embedding_type == "relative_key_query":
838
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
839
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
840
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
841
+
842
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
843
+ if attention_mask is not None:
844
+ # Apply the attention mask is (precomputed for all layers in AlignTextModel forward() function)
845
+ attention_scores = attention_scores + attention_mask
846
+
847
+ # Normalize the attention scores to probabilities.
848
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
849
+
850
+ # This is actually dropping out entire tokens to attend to, which might
851
+ # seem a bit unusual, but is taken from the original Transformer paper.
852
+ attention_probs = self.dropout(attention_probs)
853
+
854
+ # Mask heads if we want to
855
+ if head_mask is not None:
856
+ attention_probs = attention_probs * head_mask
857
+
858
+ context_layer = torch.matmul(attention_probs, value_layer)
859
+
860
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
861
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
862
+ context_layer = context_layer.view(new_context_layer_shape)
863
+
864
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
865
+
866
+ if self.is_decoder:
867
+ outputs = outputs + (past_key_value,)
868
+ return outputs
869
+
870
+
871
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText
872
+ class AlignTextSelfOutput(nn.Module):
873
+ def __init__(self, config):
874
+ super().__init__()
875
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
876
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
877
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
878
+
879
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
880
+ hidden_states = self.dense(hidden_states)
881
+ hidden_states = self.dropout(hidden_states)
882
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
883
+ return hidden_states
884
+
885
+
886
+ # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->AlignText
887
+ class AlignTextAttention(nn.Module):
888
+ def __init__(self, config, position_embedding_type=None):
889
+ super().__init__()
890
+ self.self = AlignTextSelfAttention(config, position_embedding_type=position_embedding_type)
891
+ self.output = AlignTextSelfOutput(config)
892
+ self.pruned_heads = set()
893
+
894
+ def prune_heads(self, heads):
895
+ if len(heads) == 0:
896
+ return
897
+ heads, index = find_pruneable_heads_and_indices(
898
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
899
+ )
900
+
901
+ # Prune linear layers
902
+ self.self.query = prune_linear_layer(self.self.query, index)
903
+ self.self.key = prune_linear_layer(self.self.key, index)
904
+ self.self.value = prune_linear_layer(self.self.value, index)
905
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
906
+
907
+ # Update hyper params and store pruned heads
908
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
909
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
910
+ self.pruned_heads = self.pruned_heads.union(heads)
911
+
912
+ def forward(
913
+ self,
914
+ hidden_states: torch.Tensor,
915
+ attention_mask: Optional[torch.FloatTensor] = None,
916
+ head_mask: Optional[torch.FloatTensor] = None,
917
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
918
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
919
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
920
+ output_attentions: Optional[bool] = False,
921
+ ) -> Tuple[torch.Tensor]:
922
+ self_outputs = self.self(
923
+ hidden_states,
924
+ attention_mask,
925
+ head_mask,
926
+ encoder_hidden_states,
927
+ encoder_attention_mask,
928
+ past_key_value,
929
+ output_attentions,
930
+ )
931
+ attention_output = self.output(self_outputs[0], hidden_states)
932
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
933
+ return outputs
934
+
935
+
936
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText
937
+ class AlignTextIntermediate(nn.Module):
938
+ def __init__(self, config):
939
+ super().__init__()
940
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
941
+ if isinstance(config.hidden_act, str):
942
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
943
+ else:
944
+ self.intermediate_act_fn = config.hidden_act
945
+
946
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
947
+ hidden_states = self.dense(hidden_states)
948
+ hidden_states = self.intermediate_act_fn(hidden_states)
949
+ return hidden_states
950
+
951
+
952
+ # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText
953
+ class AlignTextOutput(nn.Module):
954
+ def __init__(self, config):
955
+ super().__init__()
956
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
957
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
958
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
959
+
960
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
961
+ hidden_states = self.dense(hidden_states)
962
+ hidden_states = self.dropout(hidden_states)
963
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
964
+ return hidden_states
965
+
966
+
967
+ # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->AlignText
968
+ class AlignTextLayer(nn.Module):
969
+ def __init__(self, config):
970
+ super().__init__()
971
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
972
+ self.seq_len_dim = 1
973
+ self.attention = AlignTextAttention(config)
974
+ self.is_decoder = config.is_decoder
975
+ self.add_cross_attention = config.add_cross_attention
976
+ if self.add_cross_attention:
977
+ if not self.is_decoder:
978
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
979
+ self.crossattention = AlignTextAttention(config, position_embedding_type="absolute")
980
+ self.intermediate = AlignTextIntermediate(config)
981
+ self.output = AlignTextOutput(config)
982
+
983
+ def forward(
984
+ self,
985
+ hidden_states: torch.Tensor,
986
+ attention_mask: Optional[torch.FloatTensor] = None,
987
+ head_mask: Optional[torch.FloatTensor] = None,
988
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
989
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
990
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
991
+ output_attentions: Optional[bool] = False,
992
+ ) -> Tuple[torch.Tensor]:
993
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
994
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
995
+ self_attention_outputs = self.attention(
996
+ hidden_states,
997
+ attention_mask,
998
+ head_mask,
999
+ output_attentions=output_attentions,
1000
+ past_key_value=self_attn_past_key_value,
1001
+ )
1002
+ attention_output = self_attention_outputs[0]
1003
+
1004
+ # if decoder, the last output is tuple of self-attn cache
1005
+ if self.is_decoder:
1006
+ outputs = self_attention_outputs[1:-1]
1007
+ present_key_value = self_attention_outputs[-1]
1008
+ else:
1009
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
1010
+
1011
+ cross_attn_present_key_value = None
1012
+ if self.is_decoder and encoder_hidden_states is not None:
1013
+ if not hasattr(self, "crossattention"):
1014
+ raise ValueError(
1015
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
1016
+ " by setting `config.add_cross_attention=True`"
1017
+ )
1018
+
1019
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
1020
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
1021
+ cross_attention_outputs = self.crossattention(
1022
+ attention_output,
1023
+ attention_mask,
1024
+ head_mask,
1025
+ encoder_hidden_states,
1026
+ encoder_attention_mask,
1027
+ cross_attn_past_key_value,
1028
+ output_attentions,
1029
+ )
1030
+ attention_output = cross_attention_outputs[0]
1031
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
1032
+
1033
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
1034
+ cross_attn_present_key_value = cross_attention_outputs[-1]
1035
+ present_key_value = present_key_value + cross_attn_present_key_value
1036
+
1037
+ layer_output = apply_chunking_to_forward(
1038
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
1039
+ )
1040
+ outputs = (layer_output,) + outputs
1041
+
1042
+ # if decoder, return the attn key/values as the last output
1043
+ if self.is_decoder:
1044
+ outputs = outputs + (present_key_value,)
1045
+
1046
+ return outputs
1047
+
1048
+ def feed_forward_chunk(self, attention_output):
1049
+ intermediate_output = self.intermediate(attention_output)
1050
+ layer_output = self.output(intermediate_output, attention_output)
1051
+ return layer_output
1052
+
1053
+
1054
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->AlignText
1055
+ class AlignTextEncoder(nn.Module):
1056
+ def __init__(self, config):
1057
+ super().__init__()
1058
+ self.config = config
1059
+ self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)])
1060
+ self.gradient_checkpointing = False
1061
+
1062
+ def forward(
1063
+ self,
1064
+ hidden_states: torch.Tensor,
1065
+ attention_mask: Optional[torch.FloatTensor] = None,
1066
+ head_mask: Optional[torch.FloatTensor] = None,
1067
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
1068
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1069
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
1070
+ use_cache: Optional[bool] = None,
1071
+ output_attentions: Optional[bool] = False,
1072
+ output_hidden_states: Optional[bool] = False,
1073
+ return_dict: Optional[bool] = True,
1074
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
1075
+ all_hidden_states = () if output_hidden_states else None
1076
+ all_self_attentions = () if output_attentions else None
1077
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
1078
+
1079
+ if self.gradient_checkpointing and self.training:
1080
+ if use_cache:
1081
+ logger.warning_once(
1082
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
1083
+ )
1084
+ use_cache = False
1085
+
1086
+ next_decoder_cache = () if use_cache else None
1087
+ for i, layer_module in enumerate(self.layer):
1088
+ if output_hidden_states:
1089
+ all_hidden_states = all_hidden_states + (hidden_states,)
1090
+
1091
+ layer_head_mask = head_mask[i] if head_mask is not None else None
1092
+ past_key_value = past_key_values[i] if past_key_values is not None else None
1093
+
1094
+ if self.gradient_checkpointing and self.training:
1095
+ layer_outputs = self._gradient_checkpointing_func(
1096
+ layer_module.__call__,
1097
+ hidden_states,
1098
+ attention_mask,
1099
+ layer_head_mask,
1100
+ encoder_hidden_states,
1101
+ encoder_attention_mask,
1102
+ past_key_value,
1103
+ output_attentions,
1104
+ )
1105
+ else:
1106
+ layer_outputs = layer_module(
1107
+ hidden_states,
1108
+ attention_mask,
1109
+ layer_head_mask,
1110
+ encoder_hidden_states,
1111
+ encoder_attention_mask,
1112
+ past_key_value,
1113
+ output_attentions,
1114
+ )
1115
+
1116
+ hidden_states = layer_outputs[0]
1117
+ if use_cache:
1118
+ next_decoder_cache += (layer_outputs[-1],)
1119
+ if output_attentions:
1120
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
1121
+ if self.config.add_cross_attention:
1122
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
1123
+
1124
+ if output_hidden_states:
1125
+ all_hidden_states = all_hidden_states + (hidden_states,)
1126
+
1127
+ if not return_dict:
1128
+ return tuple(
1129
+ v
1130
+ for v in [
1131
+ hidden_states,
1132
+ next_decoder_cache,
1133
+ all_hidden_states,
1134
+ all_self_attentions,
1135
+ all_cross_attentions,
1136
+ ]
1137
+ if v is not None
1138
+ )
1139
+ return BaseModelOutputWithPastAndCrossAttentions(
1140
+ last_hidden_state=hidden_states,
1141
+ past_key_values=next_decoder_cache,
1142
+ hidden_states=all_hidden_states,
1143
+ attentions=all_self_attentions,
1144
+ cross_attentions=all_cross_attentions,
1145
+ )
1146
+
1147
+
1148
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText
1149
+ class AlignTextPooler(nn.Module):
1150
+ def __init__(self, config):
1151
+ super().__init__()
1152
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1153
+ self.activation = nn.Tanh()
1154
+
1155
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1156
+ # We "pool" the model by simply taking the hidden state corresponding
1157
+ # to the first token.
1158
+ first_token_tensor = hidden_states[:, 0]
1159
+ pooled_output = self.dense(first_token_tensor)
1160
+ pooled_output = self.activation(pooled_output)
1161
+ return pooled_output
1162
+
1163
+
1164
+ class AlignPreTrainedModel(PreTrainedModel):
1165
+ """
1166
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1167
+ models.
1168
+ """
1169
+
1170
+ config_class = AlignConfig
1171
+ base_model_prefix = "align"
1172
+ supports_gradient_checkpointing = True
1173
+
1174
+ def _init_weights(self, module):
1175
+ """Initialize the weights"""
1176
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
1177
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1178
+ if module.bias is not None:
1179
+ module.bias.data.zero_()
1180
+ elif isinstance(module, AlignModel):
1181
+ nn.init.xavier_uniform_(module.text_projection.weight)
1182
+ module.text_projection.bias.data.zero_()
1183
+ module.text_projection._is_hf_initialized = True
1184
+ elif isinstance(module, nn.Embedding):
1185
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
1186
+ if module.padding_idx is not None:
1187
+ module.weight.data[module.padding_idx].zero_()
1188
+ if isinstance(module, nn.LayerNorm):
1189
+ module.bias.data.zero_()
1190
+ module.weight.data.fill_(1.0)
1191
+
1192
+
1193
+ @add_start_docstrings(
1194
+ """The text model from ALIGN without any head or projection on top.""",
1195
+ ALIGN_START_DOCSTRING,
1196
+ )
1197
+ class AlignTextModel(AlignPreTrainedModel):
1198
+ config_class = AlignTextConfig
1199
+
1200
+ def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True):
1201
+ super().__init__(config)
1202
+ self.config = config
1203
+
1204
+ self.embeddings = AlignTextEmbeddings(config)
1205
+ self.encoder = AlignTextEncoder(config)
1206
+
1207
+ self.pooler = AlignTextPooler(config) if add_pooling_layer else None
1208
+
1209
+ # Initialize weights and apply final processing
1210
+ self.post_init()
1211
+
1212
+ def get_input_embeddings(self):
1213
+ return self.embeddings.word_embeddings
1214
+
1215
+ def set_input_embeddings(self, value):
1216
+ self.embeddings.word_embeddings = value
1217
+
1218
+ @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
1219
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig)
1220
+ def forward(
1221
+ self,
1222
+ input_ids: Optional[torch.Tensor] = None,
1223
+ attention_mask: Optional[torch.Tensor] = None,
1224
+ token_type_ids: Optional[torch.Tensor] = None,
1225
+ position_ids: Optional[torch.Tensor] = None,
1226
+ head_mask: Optional[torch.Tensor] = None,
1227
+ inputs_embeds: Optional[torch.Tensor] = None,
1228
+ output_attentions: Optional[bool] = None,
1229
+ output_hidden_states: Optional[bool] = None,
1230
+ return_dict: Optional[bool] = None,
1231
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
1232
+ r"""
1233
+ Returns:
1234
+
1235
+ Examples:
1236
+
1237
+ ```python
1238
+ >>> from transformers import AutoTokenizer, AlignTextModel
1239
+
1240
+ >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
1241
+ >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
1242
+
1243
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1244
+
1245
+ >>> outputs = model(**inputs)
1246
+ >>> last_hidden_state = outputs.last_hidden_state
1247
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1248
+ ```"""
1249
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1250
+ output_hidden_states = (
1251
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1252
+ )
1253
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1254
+
1255
+ if input_ids is not None and inputs_embeds is not None:
1256
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
1257
+ elif input_ids is not None:
1258
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
1259
+ input_shape = input_ids.size()
1260
+ elif inputs_embeds is not None:
1261
+ input_shape = inputs_embeds.size()[:-1]
1262
+ else:
1263
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
1264
+
1265
+ batch_size, seq_length = input_shape
1266
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1267
+
1268
+ if attention_mask is None:
1269
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
1270
+
1271
+ if token_type_ids is None:
1272
+ if hasattr(self.embeddings, "token_type_ids"):
1273
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
1274
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
1275
+ token_type_ids = buffered_token_type_ids_expanded
1276
+ else:
1277
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
1278
+
1279
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
1280
+ # ourselves in which case we just need to make it broadcastable to all heads.
1281
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
1282
+
1283
+ # Prepare head mask if needed
1284
+ # 1.0 in head_mask indicate we keep the head
1285
+ # attention_probs has shape bsz x n_heads x N x N
1286
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
1287
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
1288
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
1289
+
1290
+ embedding_output = self.embeddings(
1291
+ input_ids=input_ids,
1292
+ position_ids=position_ids,
1293
+ token_type_ids=token_type_ids,
1294
+ inputs_embeds=inputs_embeds,
1295
+ )
1296
+ encoder_outputs = self.encoder(
1297
+ embedding_output,
1298
+ attention_mask=extended_attention_mask,
1299
+ head_mask=head_mask,
1300
+ output_attentions=output_attentions,
1301
+ output_hidden_states=output_hidden_states,
1302
+ return_dict=return_dict,
1303
+ )
1304
+ sequence_output = encoder_outputs[0]
1305
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1306
+
1307
+ if not return_dict:
1308
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
1309
+
1310
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1311
+ last_hidden_state=sequence_output,
1312
+ pooler_output=pooled_output,
1313
+ hidden_states=encoder_outputs.hidden_states,
1314
+ attentions=encoder_outputs.attentions,
1315
+ cross_attentions=encoder_outputs.cross_attentions,
1316
+ )
1317
+
1318
+
1319
+ @add_start_docstrings(
1320
+ """The vision model from ALIGN without any head or projection on top.""",
1321
+ ALIGN_START_DOCSTRING,
1322
+ )
1323
+ class AlignVisionModel(AlignPreTrainedModel):
1324
+ config_class = AlignVisionConfig
1325
+ main_input_name = "pixel_values"
1326
+ supports_gradient_checkpointing = False
1327
+
1328
+ def __init__(self, config: AlignVisionConfig):
1329
+ super().__init__(config)
1330
+ self.config = config
1331
+ self.embeddings = AlignVisionEmbeddings(config)
1332
+ self.encoder = AlignVisionEncoder(config)
1333
+
1334
+ # Final pooling layer
1335
+ if config.pooling_type == "mean":
1336
+ self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
1337
+ elif config.pooling_type == "max":
1338
+ self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
1339
+ else:
1340
+ raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
1341
+
1342
+ # Initialize weights and apply final processing
1343
+ self.post_init()
1344
+
1345
+ def get_input_embeddings(self) -> nn.Module:
1346
+ return self.vision_model.embeddings.convolution
1347
+
1348
+ @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
1349
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig)
1350
+ def forward(
1351
+ self,
1352
+ pixel_values: Optional[torch.FloatTensor] = None,
1353
+ output_hidden_states: Optional[bool] = None,
1354
+ return_dict: Optional[bool] = None,
1355
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
1356
+ r"""
1357
+ Returns:
1358
+
1359
+ Examples:
1360
+
1361
+ ```python
1362
+ >>> from PIL import Image
1363
+ >>> import requests
1364
+ >>> from transformers import AutoProcessor, AlignVisionModel
1365
+
1366
+ >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
1367
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1368
+
1369
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1370
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1371
+
1372
+ >>> inputs = processor(images=image, return_tensors="pt")
1373
+
1374
+ >>> outputs = model(**inputs)
1375
+ >>> last_hidden_state = outputs.last_hidden_state
1376
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1377
+ ```"""
1378
+ output_hidden_states = (
1379
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1380
+ )
1381
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1382
+
1383
+ if pixel_values is None:
1384
+ raise ValueError("You have to specify pixel_values")
1385
+
1386
+ embedding_output = self.embeddings(pixel_values)
1387
+ encoder_outputs = self.encoder(
1388
+ embedding_output,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+ # Apply pooling
1393
+ last_hidden_state = encoder_outputs[0]
1394
+ pooled_output = self.pooler(last_hidden_state)
1395
+ # Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim)
1396
+ pooled_output = pooled_output.reshape(pooled_output.shape[:2])
1397
+
1398
+ if not return_dict:
1399
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
1400
+
1401
+ return BaseModelOutputWithPoolingAndNoAttention(
1402
+ last_hidden_state=last_hidden_state,
1403
+ pooler_output=pooled_output,
1404
+ hidden_states=encoder_outputs.hidden_states,
1405
+ )
1406
+
1407
+
1408
+ @add_start_docstrings(ALIGN_START_DOCSTRING)
1409
+ class AlignModel(AlignPreTrainedModel):
1410
+ config_class = AlignConfig
1411
+
1412
+ def __init__(self, config: AlignConfig):
1413
+ super().__init__(config)
1414
+
1415
+ if not isinstance(config.text_config, AlignTextConfig):
1416
+ raise ValueError(
1417
+ "config.text_config is expected to be of type AlignTextConfig but is of type"
1418
+ f" {type(config.text_config)}."
1419
+ )
1420
+
1421
+ if not isinstance(config.vision_config, AlignVisionConfig):
1422
+ raise ValueError(
1423
+ "config.vision_config is expected to be of type AlignVisionConfig but is of type"
1424
+ f" {type(config.vision_config)}."
1425
+ )
1426
+
1427
+ text_config = config.text_config
1428
+ vision_config = config.vision_config
1429
+
1430
+ self.projection_dim = config.projection_dim
1431
+ self.text_embed_dim = text_config.hidden_size
1432
+
1433
+ self.text_model = AlignTextModel(text_config)
1434
+ self.vision_model = AlignVisionModel(vision_config)
1435
+
1436
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim)
1437
+ self.temperature = nn.Parameter(torch.tensor(self.config.temperature_init_value))
1438
+
1439
+ # Initialize weights and apply final processing
1440
+ self.post_init()
1441
+
1442
+ @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING)
1443
+ def get_text_features(
1444
+ self,
1445
+ input_ids: Optional[torch.Tensor] = None,
1446
+ attention_mask: Optional[torch.Tensor] = None,
1447
+ token_type_ids: Optional[torch.Tensor] = None,
1448
+ position_ids: Optional[torch.Tensor] = None,
1449
+ head_mask: Optional[torch.Tensor] = None,
1450
+ inputs_embeds: Optional[torch.Tensor] = None,
1451
+ output_attentions: Optional[bool] = None,
1452
+ output_hidden_states: Optional[bool] = None,
1453
+ return_dict: Optional[bool] = None,
1454
+ ) -> torch.FloatTensor:
1455
+ r"""
1456
+ Returns:
1457
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
1458
+ applying the projection layer to the pooled output of [`AlignTextModel`].
1459
+
1460
+ Examples:
1461
+
1462
+ ```python
1463
+ >>> from transformers import AutoTokenizer, AlignModel
1464
+
1465
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1466
+ >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
1467
+
1468
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1469
+ >>> text_features = model.get_text_features(**inputs)
1470
+ ```"""
1471
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1472
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1473
+ output_hidden_states = (
1474
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1475
+ )
1476
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1477
+
1478
+ text_outputs = self.text_model(
1479
+ input_ids=input_ids,
1480
+ attention_mask=attention_mask,
1481
+ token_type_ids=token_type_ids,
1482
+ position_ids=position_ids,
1483
+ head_mask=head_mask,
1484
+ inputs_embeds=inputs_embeds,
1485
+ output_attentions=output_attentions,
1486
+ output_hidden_states=output_hidden_states,
1487
+ return_dict=return_dict,
1488
+ )
1489
+
1490
+ last_hidden_state = text_outputs[0][:, 0, :]
1491
+ text_features = self.text_projection(last_hidden_state)
1492
+
1493
+ return text_features
1494
+
1495
+ @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING)
1496
+ def get_image_features(
1497
+ self,
1498
+ pixel_values: Optional[torch.FloatTensor] = None,
1499
+ output_hidden_states: Optional[bool] = None,
1500
+ return_dict: Optional[bool] = None,
1501
+ ) -> torch.FloatTensor:
1502
+ r"""
1503
+ Returns:
1504
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1505
+ applying the projection layer to the pooled output of [`AlignVisionModel`].
1506
+
1507
+ Examples:
1508
+
1509
+ ```python
1510
+ >>> from PIL import Image
1511
+ >>> import requests
1512
+ >>> from transformers import AutoProcessor, AlignModel
1513
+
1514
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1515
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1516
+
1517
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1518
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1519
+
1520
+ >>> inputs = processor(images=image, return_tensors="pt")
1521
+
1522
+ >>> image_features = model.get_image_features(**inputs)
1523
+ ```"""
1524
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1525
+ output_hidden_states = (
1526
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1527
+ )
1528
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1529
+
1530
+ vision_outputs = self.vision_model(
1531
+ pixel_values=pixel_values,
1532
+ output_hidden_states=output_hidden_states,
1533
+ return_dict=return_dict,
1534
+ )
1535
+
1536
+ image_features = vision_outputs[1] # pooled_output
1537
+
1538
+ return image_features
1539
+
1540
+ @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING)
1541
+ @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig)
1542
+ def forward(
1543
+ self,
1544
+ input_ids: Optional[torch.LongTensor] = None,
1545
+ pixel_values: Optional[torch.FloatTensor] = None,
1546
+ attention_mask: Optional[torch.Tensor] = None,
1547
+ token_type_ids: Optional[torch.Tensor] = None,
1548
+ position_ids: Optional[torch.Tensor] = None,
1549
+ head_mask: Optional[torch.Tensor] = None,
1550
+ inputs_embeds: Optional[torch.Tensor] = None,
1551
+ return_loss: Optional[bool] = None,
1552
+ output_attentions: Optional[bool] = None,
1553
+ output_hidden_states: Optional[bool] = None,
1554
+ return_dict: Optional[bool] = None,
1555
+ ) -> Union[Tuple, AlignOutput]:
1556
+ r"""
1557
+ Returns:
1558
+
1559
+ Examples:
1560
+
1561
+ ```python
1562
+ >>> from PIL import Image
1563
+ >>> import requests
1564
+ >>> from transformers import AutoProcessor, AlignModel
1565
+
1566
+ >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
1567
+ >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
1568
+
1569
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1570
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1571
+
1572
+ >>> inputs = processor(
1573
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1574
+ ... )
1575
+
1576
+ >>> outputs = model(**inputs)
1577
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1578
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1579
+ ```"""
1580
+ # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
1581
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1582
+ output_hidden_states = (
1583
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1584
+ )
1585
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1586
+
1587
+ vision_outputs = self.vision_model(
1588
+ pixel_values=pixel_values,
1589
+ output_hidden_states=output_hidden_states,
1590
+ return_dict=return_dict,
1591
+ )
1592
+
1593
+ text_outputs = self.text_model(
1594
+ input_ids=input_ids,
1595
+ attention_mask=attention_mask,
1596
+ token_type_ids=token_type_ids,
1597
+ position_ids=position_ids,
1598
+ head_mask=head_mask,
1599
+ inputs_embeds=inputs_embeds,
1600
+ output_attentions=output_attentions,
1601
+ output_hidden_states=output_hidden_states,
1602
+ return_dict=return_dict,
1603
+ )
1604
+
1605
+ image_embeds = vision_outputs[1]
1606
+ text_embeds = text_outputs[0][:, 0, :]
1607
+ text_embeds = self.text_projection(text_embeds)
1608
+
1609
+ # normalized features
1610
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1611
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1612
+
1613
+ # cosine similarity as logits
1614
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature
1615
+ logits_per_image = logits_per_text.t()
1616
+
1617
+ loss = None
1618
+ if return_loss:
1619
+ loss = align_loss(logits_per_text)
1620
+
1621
+ if not return_dict:
1622
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1623
+ return ((loss,) + output) if loss is not None else output
1624
+
1625
+ return AlignOutput(
1626
+ loss=loss,
1627
+ logits_per_image=logits_per_image,
1628
+ logits_per_text=logits_per_text,
1629
+ text_embeds=text_embeds,
1630
+ image_embeds=image_embeds,
1631
+ text_model_output=text_outputs,
1632
+ vision_model_output=vision_outputs,
1633
+ )
venv/lib/python3.10/site-packages/transformers/models/align/processing_align.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for ALIGN
17
+ """
18
+
19
+
20
+ from ...processing_utils import ProcessorMixin
21
+ from ...tokenization_utils_base import BatchEncoding
22
+
23
+
24
+ class AlignProcessor(ProcessorMixin):
25
+ r"""
26
+ Constructs an ALIGN processor which wraps [`EfficientNetImageProcessor`] and
27
+ [`BertTokenizer`]/[`BertTokenizerFast`] into a single processor that interits both the image processor and
28
+ tokenizer functionalities. See the [`~AlignProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
29
+ information.
30
+
31
+ Args:
32
+ image_processor ([`EfficientNetImageProcessor`]):
33
+ The image processor is a required input.
34
+ tokenizer ([`BertTokenizer`, `BertTokenizerFast`]):
35
+ The tokenizer is a required input.
36
+ """
37
+
38
+ attributes = ["image_processor", "tokenizer"]
39
+ image_processor_class = "EfficientNetImageProcessor"
40
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
41
+
42
+ def __init__(self, image_processor, tokenizer):
43
+ super().__init__(image_processor, tokenizer)
44
+
45
+ def __call__(self, text=None, images=None, padding="max_length", max_length=64, return_tensors=None, **kwargs):
46
+ """
47
+ Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
48
+ and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
49
+ the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
50
+ EfficientNetImageProcessor's [`~EfficientNetImageProcessor.__call__`] if `images` is not `None`. Please refer
51
+ to the doctsring of the above two methods for more information.
52
+
53
+ Args:
54
+ text (`str`, `List[str]`):
55
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
56
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
57
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
58
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
59
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
60
+ tensor. Both channels-first and channels-last formats are supported.
61
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `max_length`):
62
+ Activates and controls padding for tokenization of input text. Choose between [`True` or `'longest'`,
63
+ `'max_length'`, `False` or `'do_not_pad'`]
64
+ max_length (`int`, *optional*, defaults to `max_length`):
65
+ Maximum padding value to use to pad the input text during tokenization.
66
+
67
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
68
+ If set, will return tensors of a particular framework. Acceptable values are:
69
+
70
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
71
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
72
+ - `'np'`: Return NumPy `np.ndarray` objects.
73
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
74
+
75
+ Returns:
76
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
77
+
78
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
79
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
80
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
81
+ `None`).
82
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
83
+ """
84
+ if text is None and images is None:
85
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
86
+
87
+ if text is not None:
88
+ encoding = self.tokenizer(
89
+ text, padding=padding, max_length=max_length, return_tensors=return_tensors, **kwargs
90
+ )
91
+
92
+ if images is not None:
93
+ image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
94
+
95
+ if text is not None and images is not None:
96
+ encoding["pixel_values"] = image_features.pixel_values
97
+ return encoding
98
+ elif text is not None:
99
+ return encoding
100
+ else:
101
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
102
+
103
+ def batch_decode(self, *args, **kwargs):
104
+ """
105
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
106
+ refer to the docstring of this method for more information.
107
+ """
108
+ return self.tokenizer.batch_decode(*args, **kwargs)
109
+
110
+ def decode(self, *args, **kwargs):
111
+ """
112
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
113
+ the docstring of this method for more information.
114
+ """
115
+ return self.tokenizer.decode(*args, **kwargs)
116
+
117
+ @property
118
+ def model_input_names(self):
119
+ tokenizer_input_names = self.tokenizer.model_input_names
120
+ image_processor_input_names = self.image_processor.model_input_names
121
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/configuration_conditional_detr.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (9.32 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/feature_extraction_conditional_detr.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/image_processing_conditional_detr.cpython-310.pyc ADDED
Binary file (59.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/__pycache__/modeling_conditional_detr.cpython-310.pyc ADDED
Binary file (93.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/configuration_conditional_detr.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Conditional DETR model configuration"""
16
+ from collections import OrderedDict
17
+ from typing import Mapping
18
+
19
+ from packaging import version
20
+
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfig
23
+ from ...utils import logging
24
+ from ..auto import CONFIG_MAPPING
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ from ..deprecated._archive_maps import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
31
+
32
+
33
+ class ConditionalDetrConfig(PretrainedConfig):
34
+ r"""
35
+ This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate
36
+ a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a
37
+ configuration with the defaults will yield a similar configuration to that of the Conditional DETR
38
+ [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture.
39
+
40
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
41
+ documentation from [`PretrainedConfig`] for more information.
42
+
43
+ Args:
44
+ use_timm_backbone (`bool`, *optional*, defaults to `True`):
45
+ Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
46
+ API.
47
+ backbone_config (`PretrainedConfig` or `dict`, *optional*):
48
+ The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
49
+ case it will default to `ResNetConfig()`.
50
+ num_channels (`int`, *optional*, defaults to 3):
51
+ The number of input channels.
52
+ num_queries (`int`, *optional*, defaults to 100):
53
+ Number of object queries, i.e. detection slots. This is the maximal number of objects
54
+ [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
55
+ d_model (`int`, *optional*, defaults to 256):
56
+ Dimension of the layers.
57
+ encoder_layers (`int`, *optional*, defaults to 6):
58
+ Number of encoder layers.
59
+ decoder_layers (`int`, *optional*, defaults to 6):
60
+ Number of decoder layers.
61
+ encoder_attention_heads (`int`, *optional*, defaults to 8):
62
+ Number of attention heads for each attention layer in the Transformer encoder.
63
+ decoder_attention_heads (`int`, *optional*, defaults to 8):
64
+ Number of attention heads for each attention layer in the Transformer decoder.
65
+ decoder_ffn_dim (`int`, *optional*, defaults to 2048):
66
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
67
+ encoder_ffn_dim (`int`, *optional*, defaults to 2048):
68
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
69
+ activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
70
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
71
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
72
+ dropout (`float`, *optional*, defaults to 0.1):
73
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
74
+ attention_dropout (`float`, *optional*, defaults to 0.0):
75
+ The dropout ratio for the attention probabilities.
76
+ activation_dropout (`float`, *optional*, defaults to 0.0):
77
+ The dropout ratio for activations inside the fully connected layer.
78
+ init_std (`float`, *optional*, defaults to 0.02):
79
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
80
+ init_xavier_std (`float`, *optional*, defaults to 1):
81
+ The scaling factor used for the Xavier initialization gain in the HM Attention map module.
82
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
83
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
84
+ for more details.
85
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
86
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
87
+ for more details.
88
+ auxiliary_loss (`bool`, *optional*, defaults to `False`):
89
+ Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
90
+ position_embedding_type (`str`, *optional*, defaults to `"sine"`):
91
+ Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
92
+ backbone (`str`, *optional*, defaults to `"resnet50"`):
93
+ Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
94
+ will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
95
+ is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
96
+ use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
97
+ Whether to use pretrained weights for the backbone.
98
+ backbone_kwargs (`dict`, *optional*):
99
+ Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
100
+ e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
101
+ dilation (`bool`, *optional*, defaults to `False`):
102
+ Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
103
+ `use_timm_backbone` = `True`.
104
+ class_cost (`float`, *optional*, defaults to 1):
105
+ Relative weight of the classification error in the Hungarian matching cost.
106
+ bbox_cost (`float`, *optional*, defaults to 5):
107
+ Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
108
+ giou_cost (`float`, *optional*, defaults to 2):
109
+ Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
110
+ mask_loss_coefficient (`float`, *optional*, defaults to 1):
111
+ Relative weight of the Focal loss in the panoptic segmentation loss.
112
+ dice_loss_coefficient (`float`, *optional*, defaults to 1):
113
+ Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
114
+ bbox_loss_coefficient (`float`, *optional*, defaults to 5):
115
+ Relative weight of the L1 bounding box loss in the object detection loss.
116
+ giou_loss_coefficient (`float`, *optional*, defaults to 2):
117
+ Relative weight of the generalized IoU loss in the object detection loss.
118
+ eos_coefficient (`float`, *optional*, defaults to 0.1):
119
+ Relative classification weight of the 'no-object' class in the object detection loss.
120
+ focal_alpha (`float`, *optional*, defaults to 0.25):
121
+ Alpha parameter in the focal loss.
122
+
123
+ Examples:
124
+
125
+ ```python
126
+ >>> from transformers import ConditionalDetrConfig, ConditionalDetrModel
127
+
128
+ >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration
129
+ >>> configuration = ConditionalDetrConfig()
130
+
131
+ >>> # Initializing a model (with random weights) from the microsoft/conditional-detr-resnet-50 style configuration
132
+ >>> model = ConditionalDetrModel(configuration)
133
+
134
+ >>> # Accessing the model configuration
135
+ >>> configuration = model.config
136
+ ```"""
137
+
138
+ model_type = "conditional_detr"
139
+ keys_to_ignore_at_inference = ["past_key_values"]
140
+ attribute_map = {
141
+ "hidden_size": "d_model",
142
+ "num_attention_heads": "encoder_attention_heads",
143
+ }
144
+
145
+ def __init__(
146
+ self,
147
+ use_timm_backbone=True,
148
+ backbone_config=None,
149
+ num_channels=3,
150
+ num_queries=300,
151
+ encoder_layers=6,
152
+ encoder_ffn_dim=2048,
153
+ encoder_attention_heads=8,
154
+ decoder_layers=6,
155
+ decoder_ffn_dim=2048,
156
+ decoder_attention_heads=8,
157
+ encoder_layerdrop=0.0,
158
+ decoder_layerdrop=0.0,
159
+ is_encoder_decoder=True,
160
+ activation_function="relu",
161
+ d_model=256,
162
+ dropout=0.1,
163
+ attention_dropout=0.0,
164
+ activation_dropout=0.0,
165
+ init_std=0.02,
166
+ init_xavier_std=1.0,
167
+ auxiliary_loss=False,
168
+ position_embedding_type="sine",
169
+ backbone="resnet50",
170
+ use_pretrained_backbone=True,
171
+ backbone_kwargs=None,
172
+ dilation=False,
173
+ class_cost=2,
174
+ bbox_cost=5,
175
+ giou_cost=2,
176
+ mask_loss_coefficient=1,
177
+ dice_loss_coefficient=1,
178
+ cls_loss_coefficient=2,
179
+ bbox_loss_coefficient=5,
180
+ giou_loss_coefficient=2,
181
+ focal_alpha=0.25,
182
+ **kwargs,
183
+ ):
184
+ if not use_timm_backbone and use_pretrained_backbone:
185
+ raise ValueError(
186
+ "Loading pretrained backbone weights from the transformers library is not supported yet. `use_timm_backbone` must be set to `True` when `use_pretrained_backbone=True`"
187
+ )
188
+
189
+ if backbone_config is not None and backbone is not None:
190
+ raise ValueError("You can't specify both `backbone` and `backbone_config`.")
191
+
192
+ if backbone_config is not None and use_timm_backbone:
193
+ raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
194
+
195
+ if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None:
196
+ raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
197
+
198
+ if not use_timm_backbone:
199
+ if backbone_config is None:
200
+ logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
201
+ backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
202
+ elif isinstance(backbone_config, dict):
203
+ backbone_model_type = backbone_config.get("model_type")
204
+ config_class = CONFIG_MAPPING[backbone_model_type]
205
+ backbone_config = config_class.from_dict(backbone_config)
206
+
207
+ self.use_timm_backbone = use_timm_backbone
208
+ self.backbone_config = backbone_config
209
+ self.num_channels = num_channels
210
+ self.num_queries = num_queries
211
+ self.d_model = d_model
212
+ self.encoder_ffn_dim = encoder_ffn_dim
213
+ self.encoder_layers = encoder_layers
214
+ self.encoder_attention_heads = encoder_attention_heads
215
+ self.decoder_ffn_dim = decoder_ffn_dim
216
+ self.decoder_layers = decoder_layers
217
+ self.decoder_attention_heads = decoder_attention_heads
218
+ self.dropout = dropout
219
+ self.attention_dropout = attention_dropout
220
+ self.activation_dropout = activation_dropout
221
+ self.activation_function = activation_function
222
+ self.init_std = init_std
223
+ self.init_xavier_std = init_xavier_std
224
+ self.encoder_layerdrop = encoder_layerdrop
225
+ self.decoder_layerdrop = decoder_layerdrop
226
+ self.num_hidden_layers = encoder_layers
227
+ self.auxiliary_loss = auxiliary_loss
228
+ self.position_embedding_type = position_embedding_type
229
+ self.backbone = backbone
230
+ self.use_pretrained_backbone = use_pretrained_backbone
231
+ self.backbone_kwargs = backbone_kwargs
232
+ self.dilation = dilation
233
+ # Hungarian matcher
234
+ self.class_cost = class_cost
235
+ self.bbox_cost = bbox_cost
236
+ self.giou_cost = giou_cost
237
+ # Loss coefficients
238
+ self.mask_loss_coefficient = mask_loss_coefficient
239
+ self.dice_loss_coefficient = dice_loss_coefficient
240
+ self.cls_loss_coefficient = cls_loss_coefficient
241
+ self.bbox_loss_coefficient = bbox_loss_coefficient
242
+ self.giou_loss_coefficient = giou_loss_coefficient
243
+ self.focal_alpha = focal_alpha
244
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
245
+
246
+ @property
247
+ def num_attention_heads(self) -> int:
248
+ return self.encoder_attention_heads
249
+
250
+ @property
251
+ def hidden_size(self) -> int:
252
+ return self.d_model
253
+
254
+
255
+ class ConditionalDetrOnnxConfig(OnnxConfig):
256
+ torch_onnx_minimum_version = version.parse("1.11")
257
+
258
+ @property
259
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
260
+ return OrderedDict(
261
+ [
262
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
263
+ ("pixel_mask", {0: "batch"}),
264
+ ]
265
+ )
266
+
267
+ @property
268
+ def atol_for_validation(self) -> float:
269
+ return 1e-5
270
+
271
+ @property
272
+ def default_onnx_opset(self) -> int:
273
+ return 12
venv/lib/python3.10/site-packages/transformers/models/conditional_detr/image_processing_conditional_detr.py ADDED
@@ -0,0 +1,1777 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for Conditional DETR."""
16
+
17
+ import io
18
+ import pathlib
19
+ from collections import defaultdict
20
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...feature_extraction_utils import BatchFeature
25
+ from ...image_processing_utils import BaseImageProcessor, get_size_dict
26
+ from ...image_transforms import (
27
+ PaddingMode,
28
+ center_to_corners_format,
29
+ corners_to_center_format,
30
+ id_to_rgb,
31
+ pad,
32
+ rescale,
33
+ resize,
34
+ rgb_to_id,
35
+ to_channel_dimension_format,
36
+ )
37
+ from ...image_utils import (
38
+ IMAGENET_DEFAULT_MEAN,
39
+ IMAGENET_DEFAULT_STD,
40
+ AnnotationFormat,
41
+ AnnotationType,
42
+ ChannelDimension,
43
+ ImageInput,
44
+ PILImageResampling,
45
+ get_image_size,
46
+ infer_channel_dimension_format,
47
+ is_scaled_image,
48
+ make_list_of_images,
49
+ to_numpy_array,
50
+ valid_images,
51
+ validate_annotations,
52
+ validate_kwargs,
53
+ validate_preprocess_arguments,
54
+ )
55
+ from ...utils import (
56
+ TensorType,
57
+ is_flax_available,
58
+ is_jax_tensor,
59
+ is_scipy_available,
60
+ is_tf_available,
61
+ is_tf_tensor,
62
+ is_torch_available,
63
+ is_torch_tensor,
64
+ is_vision_available,
65
+ logging,
66
+ )
67
+
68
+
69
+ if is_torch_available():
70
+ import torch
71
+ from torch import nn
72
+
73
+
74
+ if is_vision_available():
75
+ import PIL
76
+
77
+
78
+ if is_scipy_available():
79
+ import scipy.special
80
+ import scipy.stats
81
+
82
+
83
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
84
+
85
+
86
+ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC)
87
+
88
+
89
+ # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
90
+ def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
91
+ """
92
+ Computes the output image size given the input image size and the desired output size.
93
+
94
+ Args:
95
+ image_size (`Tuple[int, int]`):
96
+ The input image size.
97
+ size (`int`):
98
+ The desired output size.
99
+ max_size (`int`, *optional*):
100
+ The maximum allowed output size.
101
+ """
102
+ height, width = image_size
103
+ if max_size is not None:
104
+ min_original_size = float(min((height, width)))
105
+ max_original_size = float(max((height, width)))
106
+ if max_original_size / min_original_size * size > max_size:
107
+ size = int(round(max_size * min_original_size / max_original_size))
108
+
109
+ if (height <= width and height == size) or (width <= height and width == size):
110
+ return height, width
111
+
112
+ if width < height:
113
+ ow = size
114
+ oh = int(size * height / width)
115
+ else:
116
+ oh = size
117
+ ow = int(size * width / height)
118
+ return (oh, ow)
119
+
120
+
121
+ # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
122
+ def get_resize_output_image_size(
123
+ input_image: np.ndarray,
124
+ size: Union[int, Tuple[int, int], List[int]],
125
+ max_size: Optional[int] = None,
126
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
127
+ ) -> Tuple[int, int]:
128
+ """
129
+ Computes the output image size given the input image size and the desired output size. If the desired output size
130
+ is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
131
+ image size is computed by keeping the aspect ratio of the input image size.
132
+
133
+ Args:
134
+ input_image (`np.ndarray`):
135
+ The image to resize.
136
+ size (`int` or `Tuple[int, int]` or `List[int]`):
137
+ The desired output size.
138
+ max_size (`int`, *optional*):
139
+ The maximum allowed output size.
140
+ input_data_format (`ChannelDimension` or `str`, *optional*):
141
+ The channel dimension format of the input image. If not provided, it will be inferred from the input image.
142
+ """
143
+ image_size = get_image_size(input_image, input_data_format)
144
+ if isinstance(size, (list, tuple)):
145
+ return size
146
+
147
+ return get_size_with_aspect_ratio(image_size, size, max_size)
148
+
149
+
150
+ # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
151
+ def get_numpy_to_framework_fn(arr) -> Callable:
152
+ """
153
+ Returns a function that converts a numpy array to the framework of the input array.
154
+
155
+ Args:
156
+ arr (`np.ndarray`): The array to convert.
157
+ """
158
+ if isinstance(arr, np.ndarray):
159
+ return np.array
160
+ if is_tf_available() and is_tf_tensor(arr):
161
+ import tensorflow as tf
162
+
163
+ return tf.convert_to_tensor
164
+ if is_torch_available() and is_torch_tensor(arr):
165
+ import torch
166
+
167
+ return torch.tensor
168
+ if is_flax_available() and is_jax_tensor(arr):
169
+ import jax.numpy as jnp
170
+
171
+ return jnp.array
172
+ raise ValueError(f"Cannot convert arrays of type {type(arr)}")
173
+
174
+
175
+ # Copied from transformers.models.detr.image_processing_detr.safe_squeeze
176
+ def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
177
+ """
178
+ Squeezes an array, but only if the axis specified has dim 1.
179
+ """
180
+ if axis is None:
181
+ return arr.squeeze()
182
+
183
+ try:
184
+ return arr.squeeze(axis=axis)
185
+ except ValueError:
186
+ return arr
187
+
188
+
189
+ # Copied from transformers.models.detr.image_processing_detr.normalize_annotation
190
+ def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
191
+ image_height, image_width = image_size
192
+ norm_annotation = {}
193
+ for key, value in annotation.items():
194
+ if key == "boxes":
195
+ boxes = value
196
+ boxes = corners_to_center_format(boxes)
197
+ boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
198
+ norm_annotation[key] = boxes
199
+ else:
200
+ norm_annotation[key] = value
201
+ return norm_annotation
202
+
203
+
204
+ # Copied from transformers.models.detr.image_processing_detr.max_across_indices
205
+ def max_across_indices(values: Iterable[Any]) -> List[Any]:
206
+ """
207
+ Return the maximum value across all indices of an iterable of values.
208
+ """
209
+ return [max(values_i) for values_i in zip(*values)]
210
+
211
+
212
+ # Copied from transformers.models.detr.image_processing_detr.get_max_height_width
213
+ def get_max_height_width(
214
+ images: List[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
215
+ ) -> List[int]:
216
+ """
217
+ Get the maximum height and width across all images in a batch.
218
+ """
219
+ if input_data_format is None:
220
+ input_data_format = infer_channel_dimension_format(images[0])
221
+
222
+ if input_data_format == ChannelDimension.FIRST:
223
+ _, max_height, max_width = max_across_indices([img.shape for img in images])
224
+ elif input_data_format == ChannelDimension.LAST:
225
+ max_height, max_width, _ = max_across_indices([img.shape for img in images])
226
+ else:
227
+ raise ValueError(f"Invalid channel dimension format: {input_data_format}")
228
+ return (max_height, max_width)
229
+
230
+
231
+ # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
232
+ def make_pixel_mask(
233
+ image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
234
+ ) -> np.ndarray:
235
+ """
236
+ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
237
+
238
+ Args:
239
+ image (`np.ndarray`):
240
+ Image to make the pixel mask for.
241
+ output_size (`Tuple[int, int]`):
242
+ Output size of the mask.
243
+ """
244
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
245
+ mask = np.zeros(output_size, dtype=np.int64)
246
+ mask[:input_height, :input_width] = 1
247
+ return mask
248
+
249
+
250
+ # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
251
+ def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
252
+ """
253
+ Convert a COCO polygon annotation to a mask.
254
+
255
+ Args:
256
+ segmentations (`List[List[float]]`):
257
+ List of polygons, each polygon represented by a list of x-y coordinates.
258
+ height (`int`):
259
+ Height of the mask.
260
+ width (`int`):
261
+ Width of the mask.
262
+ """
263
+ try:
264
+ from pycocotools import mask as coco_mask
265
+ except ImportError:
266
+ raise ImportError("Pycocotools is not installed in your environment.")
267
+
268
+ masks = []
269
+ for polygons in segmentations:
270
+ rles = coco_mask.frPyObjects(polygons, height, width)
271
+ mask = coco_mask.decode(rles)
272
+ if len(mask.shape) < 3:
273
+ mask = mask[..., None]
274
+ mask = np.asarray(mask, dtype=np.uint8)
275
+ mask = np.any(mask, axis=2)
276
+ masks.append(mask)
277
+ if masks:
278
+ masks = np.stack(masks, axis=0)
279
+ else:
280
+ masks = np.zeros((0, height, width), dtype=np.uint8)
281
+
282
+ return masks
283
+
284
+
285
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr
286
+ def prepare_coco_detection_annotation(
287
+ image,
288
+ target,
289
+ return_segmentation_masks: bool = False,
290
+ input_data_format: Optional[Union[ChannelDimension, str]] = None,
291
+ ):
292
+ """
293
+ Convert the target in COCO format into the format expected by ConditionalDetr.
294
+ """
295
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
296
+
297
+ image_id = target["image_id"]
298
+ image_id = np.asarray([image_id], dtype=np.int64)
299
+
300
+ # Get all COCO annotations for the given image.
301
+ annotations = target["annotations"]
302
+ annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
303
+
304
+ classes = [obj["category_id"] for obj in annotations]
305
+ classes = np.asarray(classes, dtype=np.int64)
306
+
307
+ # for conversion to coco api
308
+ area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
309
+ iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
310
+
311
+ boxes = [obj["bbox"] for obj in annotations]
312
+ # guard against no boxes via resizing
313
+ boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
314
+ boxes[:, 2:] += boxes[:, :2]
315
+ boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
316
+ boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
317
+
318
+ keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
319
+
320
+ new_target = {}
321
+ new_target["image_id"] = image_id
322
+ new_target["class_labels"] = classes[keep]
323
+ new_target["boxes"] = boxes[keep]
324
+ new_target["area"] = area[keep]
325
+ new_target["iscrowd"] = iscrowd[keep]
326
+ new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
327
+
328
+ if annotations and "keypoints" in annotations[0]:
329
+ keypoints = [obj["keypoints"] for obj in annotations]
330
+ # Converting the filtered keypoints list to a numpy array
331
+ keypoints = np.asarray(keypoints, dtype=np.float32)
332
+ # Apply the keep mask here to filter the relevant annotations
333
+ keypoints = keypoints[keep]
334
+ num_keypoints = keypoints.shape[0]
335
+ keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
336
+ new_target["keypoints"] = keypoints
337
+
338
+ if return_segmentation_masks:
339
+ segmentation_masks = [obj["segmentation"] for obj in annotations]
340
+ masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
341
+ new_target["masks"] = masks[keep]
342
+
343
+ return new_target
344
+
345
+
346
+ # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
347
+ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
348
+ """
349
+ Compute the bounding boxes around the provided panoptic segmentation masks.
350
+
351
+ Args:
352
+ masks: masks in format `[number_masks, height, width]` where N is the number of masks
353
+
354
+ Returns:
355
+ boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
356
+ """
357
+ if masks.size == 0:
358
+ return np.zeros((0, 4))
359
+
360
+ h, w = masks.shape[-2:]
361
+ y = np.arange(0, h, dtype=np.float32)
362
+ x = np.arange(0, w, dtype=np.float32)
363
+ # see https://github.com/pytorch/pytorch/issues/50276
364
+ y, x = np.meshgrid(y, x, indexing="ij")
365
+
366
+ x_mask = masks * np.expand_dims(x, axis=0)
367
+ x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
368
+ x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
369
+ x_min = x.filled(fill_value=1e8)
370
+ x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
371
+
372
+ y_mask = masks * np.expand_dims(y, axis=0)
373
+ y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
374
+ y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
375
+ y_min = y.filled(fill_value=1e8)
376
+ y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
377
+
378
+ return np.stack([x_min, y_min, x_max, y_max], 1)
379
+
380
+
381
+ # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr
382
+ def prepare_coco_panoptic_annotation(
383
+ image: np.ndarray,
384
+ target: Dict,
385
+ masks_path: Union[str, pathlib.Path],
386
+ return_masks: bool = True,
387
+ input_data_format: Union[ChannelDimension, str] = None,
388
+ ) -> Dict:
389
+ """
390
+ Prepare a coco panoptic annotation for ConditionalDetr.
391
+ """
392
+ image_height, image_width = get_image_size(image, channel_dim=input_data_format)
393
+ annotation_path = pathlib.Path(masks_path) / target["file_name"]
394
+
395
+ new_target = {}
396
+ new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
397
+ new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
398
+ new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
399
+
400
+ if "segments_info" in target:
401
+ masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
402
+ masks = rgb_to_id(masks)
403
+
404
+ ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
405
+ masks = masks == ids[:, None, None]
406
+ masks = masks.astype(np.uint8)
407
+ if return_masks:
408
+ new_target["masks"] = masks
409
+ new_target["boxes"] = masks_to_boxes(masks)
410
+ new_target["class_labels"] = np.array(
411
+ [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
412
+ )
413
+ new_target["iscrowd"] = np.asarray(
414
+ [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
415
+ )
416
+ new_target["area"] = np.asarray(
417
+ [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
418
+ )
419
+
420
+ return new_target
421
+
422
+
423
+ # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
424
+ def get_segmentation_image(
425
+ masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
426
+ ):
427
+ h, w = input_size
428
+ final_h, final_w = target_size
429
+
430
+ m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
431
+
432
+ if m_id.shape[-1] == 0:
433
+ # We didn't detect any mask :(
434
+ m_id = np.zeros((h, w), dtype=np.int64)
435
+ else:
436
+ m_id = m_id.argmax(-1).reshape(h, w)
437
+
438
+ if deduplicate:
439
+ # Merge the masks corresponding to the same stuff class
440
+ for equiv in stuff_equiv_classes.values():
441
+ for eq_id in equiv:
442
+ m_id[m_id == eq_id] = equiv[0]
443
+
444
+ seg_img = id_to_rgb(m_id)
445
+ seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
446
+ return seg_img
447
+
448
+
449
+ # Copied from transformers.models.detr.image_processing_detr.get_mask_area
450
+ def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
451
+ final_h, final_w = target_size
452
+ np_seg_img = seg_img.astype(np.uint8)
453
+ np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
454
+ m_id = rgb_to_id(np_seg_img)
455
+ area = [(m_id == i).sum() for i in range(n_classes)]
456
+ return area
457
+
458
+
459
+ # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
460
+ def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
461
+ probs = scipy.special.softmax(logits, axis=-1)
462
+ labels = probs.argmax(-1, keepdims=True)
463
+ scores = np.take_along_axis(probs, labels, axis=-1)
464
+ scores, labels = scores.squeeze(-1), labels.squeeze(-1)
465
+ return scores, labels
466
+
467
+
468
+ # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation
469
+ def post_process_panoptic_sample(
470
+ out_logits: np.ndarray,
471
+ masks: np.ndarray,
472
+ boxes: np.ndarray,
473
+ processed_size: Tuple[int, int],
474
+ target_size: Tuple[int, int],
475
+ is_thing_map: Dict,
476
+ threshold=0.85,
477
+ ) -> Dict:
478
+ """
479
+ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample.
480
+
481
+ Args:
482
+ out_logits (`torch.Tensor`):
483
+ The logits for this sample.
484
+ masks (`torch.Tensor`):
485
+ The predicted segmentation masks for this sample.
486
+ boxes (`torch.Tensor`):
487
+ The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
488
+ width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
489
+ processed_size (`Tuple[int, int]`):
490
+ The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
491
+ after data augmentation but before batching.
492
+ target_size (`Tuple[int, int]`):
493
+ The target size of the image, `(height, width)` corresponding to the requested final size of the
494
+ prediction.
495
+ is_thing_map (`Dict`):
496
+ A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
497
+ threshold (`float`, *optional*, defaults to 0.85):
498
+ The threshold used to binarize the segmentation masks.
499
+ """
500
+ # we filter empty queries and detection below threshold
501
+ scores, labels = score_labels_from_class_probabilities(out_logits)
502
+ keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
503
+
504
+ cur_scores = scores[keep]
505
+ cur_classes = labels[keep]
506
+ cur_boxes = center_to_corners_format(boxes[keep])
507
+
508
+ if len(cur_boxes) != len(cur_classes):
509
+ raise ValueError("Not as many boxes as there are classes")
510
+
511
+ cur_masks = masks[keep]
512
+ cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
513
+ cur_masks = safe_squeeze(cur_masks, 1)
514
+ b, h, w = cur_masks.shape
515
+
516
+ # It may be that we have several predicted masks for the same stuff class.
517
+ # In the following, we track the list of masks ids for each stuff class (they are merged later on)
518
+ cur_masks = cur_masks.reshape(b, -1)
519
+ stuff_equiv_classes = defaultdict(list)
520
+ for k, label in enumerate(cur_classes):
521
+ if not is_thing_map[label]:
522
+ stuff_equiv_classes[label].append(k)
523
+
524
+ seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
525
+ area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
526
+
527
+ # We filter out any mask that is too small
528
+ if cur_classes.size() > 0:
529
+ # We know filter empty masks as long as we find some
530
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
531
+ while filtered_small.any():
532
+ cur_masks = cur_masks[~filtered_small]
533
+ cur_scores = cur_scores[~filtered_small]
534
+ cur_classes = cur_classes[~filtered_small]
535
+ seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
536
+ area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
537
+ filtered_small = np.array([a <= 4 for a in area], dtype=bool)
538
+ else:
539
+ cur_classes = np.ones((1, 1), dtype=np.int64)
540
+
541
+ segments_info = [
542
+ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
543
+ for i, (cat, a) in enumerate(zip(cur_classes, area))
544
+ ]
545
+ del cur_classes
546
+
547
+ with io.BytesIO() as out:
548
+ PIL.Image.fromarray(seg_img).save(out, format="PNG")
549
+ predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
550
+
551
+ return predictions
552
+
553
+
554
+ # Copied from transformers.models.detr.image_processing_detr.resize_annotation
555
+ def resize_annotation(
556
+ annotation: Dict[str, Any],
557
+ orig_size: Tuple[int, int],
558
+ target_size: Tuple[int, int],
559
+ threshold: float = 0.5,
560
+ resample: PILImageResampling = PILImageResampling.NEAREST,
561
+ ):
562
+ """
563
+ Resizes an annotation to a target size.
564
+
565
+ Args:
566
+ annotation (`Dict[str, Any]`):
567
+ The annotation dictionary.
568
+ orig_size (`Tuple[int, int]`):
569
+ The original size of the input image.
570
+ target_size (`Tuple[int, int]`):
571
+ The target size of the image, as returned by the preprocessing `resize` step.
572
+ threshold (`float`, *optional*, defaults to 0.5):
573
+ The threshold used to binarize the segmentation masks.
574
+ resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
575
+ The resampling filter to use when resizing the masks.
576
+ """
577
+ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
578
+ ratio_height, ratio_width = ratios
579
+
580
+ new_annotation = {}
581
+ new_annotation["size"] = target_size
582
+
583
+ for key, value in annotation.items():
584
+ if key == "boxes":
585
+ boxes = value
586
+ scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
587
+ new_annotation["boxes"] = scaled_boxes
588
+ elif key == "area":
589
+ area = value
590
+ scaled_area = area * (ratio_width * ratio_height)
591
+ new_annotation["area"] = scaled_area
592
+ elif key == "masks":
593
+ masks = value[:, None]
594
+ masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
595
+ masks = masks.astype(np.float32)
596
+ masks = masks[:, 0] > threshold
597
+ new_annotation["masks"] = masks
598
+ elif key == "size":
599
+ new_annotation["size"] = target_size
600
+ else:
601
+ new_annotation[key] = value
602
+
603
+ return new_annotation
604
+
605
+
606
+ # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
607
+ def binary_mask_to_rle(mask):
608
+ """
609
+ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
610
+
611
+ Args:
612
+ mask (`torch.Tensor` or `numpy.array`):
613
+ A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
614
+ segment_id or class_id.
615
+ Returns:
616
+ `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
617
+ format.
618
+ """
619
+ if is_torch_tensor(mask):
620
+ mask = mask.numpy()
621
+
622
+ pixels = mask.flatten()
623
+ pixels = np.concatenate([[0], pixels, [0]])
624
+ runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
625
+ runs[1::2] -= runs[::2]
626
+ return list(runs)
627
+
628
+
629
+ # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
630
+ def convert_segmentation_to_rle(segmentation):
631
+ """
632
+ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
633
+
634
+ Args:
635
+ segmentation (`torch.Tensor` or `numpy.array`):
636
+ A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
637
+ Returns:
638
+ `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
639
+ """
640
+ segment_ids = torch.unique(segmentation)
641
+
642
+ run_length_encodings = []
643
+ for idx in segment_ids:
644
+ mask = torch.where(segmentation == idx, 1, 0)
645
+ rle = binary_mask_to_rle(mask)
646
+ run_length_encodings.append(rle)
647
+
648
+ return run_length_encodings
649
+
650
+
651
+ # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
652
+ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
653
+ """
654
+ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
655
+ `labels`.
656
+
657
+ Args:
658
+ masks (`torch.Tensor`):
659
+ A tensor of shape `(num_queries, height, width)`.
660
+ scores (`torch.Tensor`):
661
+ A tensor of shape `(num_queries)`.
662
+ labels (`torch.Tensor`):
663
+ A tensor of shape `(num_queries)`.
664
+ object_mask_threshold (`float`):
665
+ A number between 0 and 1 used to binarize the masks.
666
+ Raises:
667
+ `ValueError`: Raised when the first dimension doesn't match in all input tensors.
668
+ Returns:
669
+ `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
670
+ < `object_mask_threshold`.
671
+ """
672
+ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
673
+ raise ValueError("mask, scores and labels must have the same shape!")
674
+
675
+ to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
676
+
677
+ return masks[to_keep], scores[to_keep], labels[to_keep]
678
+
679
+
680
+ # Copied from transformers.models.detr.image_processing_detr.check_segment_validity
681
+ def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
682
+ # Get the mask associated with the k class
683
+ mask_k = mask_labels == k
684
+ mask_k_area = mask_k.sum()
685
+
686
+ # Compute the area of all the stuff in query k
687
+ original_area = (mask_probs[k] >= mask_threshold).sum()
688
+ mask_exists = mask_k_area > 0 and original_area > 0
689
+
690
+ # Eliminate disconnected tiny segments
691
+ if mask_exists:
692
+ area_ratio = mask_k_area / original_area
693
+ if not area_ratio.item() > overlap_mask_area_threshold:
694
+ mask_exists = False
695
+
696
+ return mask_exists, mask_k
697
+
698
+
699
+ # Copied from transformers.models.detr.image_processing_detr.compute_segments
700
+ def compute_segments(
701
+ mask_probs,
702
+ pred_scores,
703
+ pred_labels,
704
+ mask_threshold: float = 0.5,
705
+ overlap_mask_area_threshold: float = 0.8,
706
+ label_ids_to_fuse: Optional[Set[int]] = None,
707
+ target_size: Tuple[int, int] = None,
708
+ ):
709
+ height = mask_probs.shape[1] if target_size is None else target_size[0]
710
+ width = mask_probs.shape[2] if target_size is None else target_size[1]
711
+
712
+ segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
713
+ segments: List[Dict] = []
714
+
715
+ if target_size is not None:
716
+ mask_probs = nn.functional.interpolate(
717
+ mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
718
+ )[0]
719
+
720
+ current_segment_id = 0
721
+
722
+ # Weigh each mask by its prediction score
723
+ mask_probs *= pred_scores.view(-1, 1, 1)
724
+ mask_labels = mask_probs.argmax(0) # [height, width]
725
+
726
+ # Keep track of instances of each class
727
+ stuff_memory_list: Dict[str, int] = {}
728
+ for k in range(pred_labels.shape[0]):
729
+ pred_class = pred_labels[k].item()
730
+ should_fuse = pred_class in label_ids_to_fuse
731
+
732
+ # Check if mask exists and large enough to be a segment
733
+ mask_exists, mask_k = check_segment_validity(
734
+ mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
735
+ )
736
+
737
+ if mask_exists:
738
+ if pred_class in stuff_memory_list:
739
+ current_segment_id = stuff_memory_list[pred_class]
740
+ else:
741
+ current_segment_id += 1
742
+
743
+ # Add current object segment to final segmentation map
744
+ segmentation[mask_k] = current_segment_id
745
+ segment_score = round(pred_scores[k].item(), 6)
746
+ segments.append(
747
+ {
748
+ "id": current_segment_id,
749
+ "label_id": pred_class,
750
+ "was_fused": should_fuse,
751
+ "score": segment_score,
752
+ }
753
+ )
754
+ if should_fuse:
755
+ stuff_memory_list[pred_class] = current_segment_id
756
+
757
+ return segmentation, segments
758
+
759
+
760
+ class ConditionalDetrImageProcessor(BaseImageProcessor):
761
+ r"""
762
+ Constructs a Conditional Detr image processor.
763
+
764
+ Args:
765
+ format (`str`, *optional*, defaults to `"coco_detection"`):
766
+ Data format of the annotations. One of "coco_detection" or "coco_panoptic".
767
+ do_resize (`bool`, *optional*, defaults to `True`):
768
+ Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
769
+ overridden by the `do_resize` parameter in the `preprocess` method.
770
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
771
+ Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
772
+ the `preprocess` method.
773
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
774
+ Resampling filter to use if resizing the image.
775
+ do_rescale (`bool`, *optional*, defaults to `True`):
776
+ Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
777
+ `do_rescale` parameter in the `preprocess` method.
778
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
779
+ Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
780
+ `preprocess` method.
781
+ do_normalize:
782
+ Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
783
+ `preprocess` method.
784
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
785
+ Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
786
+ channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
787
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
788
+ Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
789
+ for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
790
+ do_convert_annotations (`bool`, *optional*, defaults to `True`):
791
+ Controls whether to convert the annotations to the format expected by the DETR model. Converts the
792
+ bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
793
+ Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
794
+ do_pad (`bool`, *optional*, defaults to `True`):
795
+ Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`
796
+ method. If `True` will pad the images in the batch to the largest height and width in the batch.
797
+ Padding will be applied to the bottom and right of the image with zeros.
798
+ """
799
+
800
+ model_input_names = ["pixel_values", "pixel_mask"]
801
+
802
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
803
+ def __init__(
804
+ self,
805
+ format: Union[str, AnnotationFormat] = AnnotationFormat.COCO_DETECTION,
806
+ do_resize: bool = True,
807
+ size: Dict[str, int] = None,
808
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
809
+ do_rescale: bool = True,
810
+ rescale_factor: Union[int, float] = 1 / 255,
811
+ do_normalize: bool = True,
812
+ image_mean: Union[float, List[float]] = None,
813
+ image_std: Union[float, List[float]] = None,
814
+ do_convert_annotations: Optional[bool] = None,
815
+ do_pad: bool = True,
816
+ **kwargs,
817
+ ) -> None:
818
+ if "pad_and_return_pixel_mask" in kwargs:
819
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
820
+
821
+ if "max_size" in kwargs:
822
+ logger.warning_once(
823
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
824
+ "Please specify in `size['longest_edge'] instead`.",
825
+ )
826
+ max_size = kwargs.pop("max_size")
827
+ else:
828
+ max_size = None if size is None else 1333
829
+
830
+ size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
831
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
832
+
833
+ # Backwards compatibility
834
+ if do_convert_annotations is None:
835
+ do_convert_annotations = do_normalize
836
+
837
+ super().__init__(**kwargs)
838
+ self.format = format
839
+ self.do_resize = do_resize
840
+ self.size = size
841
+ self.resample = resample
842
+ self.do_rescale = do_rescale
843
+ self.rescale_factor = rescale_factor
844
+ self.do_normalize = do_normalize
845
+ self.do_convert_annotations = do_convert_annotations
846
+ self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
847
+ self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
848
+ self.do_pad = do_pad
849
+ self._valid_processor_keys = [
850
+ "images",
851
+ "annotations",
852
+ "return_segmentation_masks",
853
+ "masks_path",
854
+ "do_resize",
855
+ "size",
856
+ "resample",
857
+ "do_rescale",
858
+ "rescale_factor",
859
+ "do_normalize",
860
+ "do_convert_annotations",
861
+ "image_mean",
862
+ "image_std",
863
+ "do_pad",
864
+ "format",
865
+ "return_tensors",
866
+ "data_format",
867
+ "input_data_format",
868
+ ]
869
+
870
+ @classmethod
871
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr
872
+ def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
873
+ """
874
+ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
875
+ created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600,
876
+ max_size=800)`
877
+ """
878
+ image_processor_dict = image_processor_dict.copy()
879
+ if "max_size" in kwargs:
880
+ image_processor_dict["max_size"] = kwargs.pop("max_size")
881
+ if "pad_and_return_pixel_mask" in kwargs:
882
+ image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
883
+ return super().from_dict(image_processor_dict, **kwargs)
884
+
885
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr
886
+ def prepare_annotation(
887
+ self,
888
+ image: np.ndarray,
889
+ target: Dict,
890
+ format: Optional[AnnotationFormat] = None,
891
+ return_segmentation_masks: bool = None,
892
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
893
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
894
+ ) -> Dict:
895
+ """
896
+ Prepare an annotation for feeding into ConditionalDetr model.
897
+ """
898
+ format = format if format is not None else self.format
899
+
900
+ if format == AnnotationFormat.COCO_DETECTION:
901
+ return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
902
+ target = prepare_coco_detection_annotation(
903
+ image, target, return_segmentation_masks, input_data_format=input_data_format
904
+ )
905
+ elif format == AnnotationFormat.COCO_PANOPTIC:
906
+ return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
907
+ target = prepare_coco_panoptic_annotation(
908
+ image,
909
+ target,
910
+ masks_path=masks_path,
911
+ return_masks=return_segmentation_masks,
912
+ input_data_format=input_data_format,
913
+ )
914
+ else:
915
+ raise ValueError(f"Format {format} is not supported.")
916
+ return target
917
+
918
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
919
+ def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
920
+ logger.warning_once(
921
+ "The `prepare` method is deprecated and will be removed in a v4.33. "
922
+ "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
923
+ "does not return the image anymore.",
924
+ )
925
+ target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
926
+ return image, target
927
+
928
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
929
+ def convert_coco_poly_to_mask(self, *args, **kwargs):
930
+ logger.warning_once("The `convert_coco_poly_to_mask` method is deprecated and will be removed in v4.33. ")
931
+ return convert_coco_poly_to_mask(*args, **kwargs)
932
+
933
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->ConditionalDetr
934
+ def prepare_coco_detection(self, *args, **kwargs):
935
+ logger.warning_once("The `prepare_coco_detection` method is deprecated and will be removed in v4.33. ")
936
+ return prepare_coco_detection_annotation(*args, **kwargs)
937
+
938
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
939
+ def prepare_coco_panoptic(self, *args, **kwargs):
940
+ logger.warning_once("The `prepare_coco_panoptic` method is deprecated and will be removed in v4.33. ")
941
+ return prepare_coco_panoptic_annotation(*args, **kwargs)
942
+
943
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
944
+ def resize(
945
+ self,
946
+ image: np.ndarray,
947
+ size: Dict[str, int],
948
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
949
+ data_format: Optional[ChannelDimension] = None,
950
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
951
+ **kwargs,
952
+ ) -> np.ndarray:
953
+ """
954
+ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
955
+ int, smaller edge of the image will be matched to this number.
956
+
957
+ Args:
958
+ image (`np.ndarray`):
959
+ Image to resize.
960
+ size (`Dict[str, int]`):
961
+ Dictionary containing the size to resize to. Can contain the keys `shortest_edge` and `longest_edge` or
962
+ `height` and `width`.
963
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
964
+ Resampling filter to use if resizing the image.
965
+ data_format (`str` or `ChannelDimension`, *optional*):
966
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
967
+ image is used.
968
+ input_data_format (`ChannelDimension` or `str`, *optional*):
969
+ The channel dimension format of the input image. If not provided, it will be inferred.
970
+ """
971
+ if "max_size" in kwargs:
972
+ logger.warning_once(
973
+ "The `max_size` parameter is deprecated and will be removed in v4.26. "
974
+ "Please specify in `size['longest_edge'] instead`.",
975
+ )
976
+ max_size = kwargs.pop("max_size")
977
+ else:
978
+ max_size = None
979
+ size = get_size_dict(size, max_size=max_size, default_to_square=False)
980
+ if "shortest_edge" in size and "longest_edge" in size:
981
+ size = get_resize_output_image_size(
982
+ image, size["shortest_edge"], size["longest_edge"], input_data_format=input_data_format
983
+ )
984
+ elif "height" in size and "width" in size:
985
+ size = (size["height"], size["width"])
986
+ else:
987
+ raise ValueError(
988
+ "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
989
+ f" {size.keys()}."
990
+ )
991
+ image = resize(
992
+ image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs
993
+ )
994
+ return image
995
+
996
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
997
+ def resize_annotation(
998
+ self,
999
+ annotation,
1000
+ orig_size,
1001
+ size,
1002
+ resample: PILImageResampling = PILImageResampling.NEAREST,
1003
+ ) -> Dict:
1004
+ """
1005
+ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
1006
+ to this number.
1007
+ """
1008
+ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
1009
+
1010
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
1011
+ def rescale(
1012
+ self,
1013
+ image: np.ndarray,
1014
+ rescale_factor: float,
1015
+ data_format: Optional[Union[str, ChannelDimension]] = None,
1016
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1017
+ ) -> np.ndarray:
1018
+ """
1019
+ Rescale the image by the given factor. image = image * rescale_factor.
1020
+
1021
+ Args:
1022
+ image (`np.ndarray`):
1023
+ Image to rescale.
1024
+ rescale_factor (`float`):
1025
+ The value to use for rescaling.
1026
+ data_format (`str` or `ChannelDimension`, *optional*):
1027
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
1028
+ image is used. Can be one of:
1029
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1030
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1031
+ input_data_format (`str` or `ChannelDimension`, *optional*):
1032
+ The channel dimension format for the input image. If unset, is inferred from the input image. Can be
1033
+ one of:
1034
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1035
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1036
+ """
1037
+ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)
1038
+
1039
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
1040
+ def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
1041
+ """
1042
+ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
1043
+ `[center_x, center_y, width, height]` format and from absolute to relative pixel values.
1044
+ """
1045
+ return normalize_annotation(annotation, image_size=image_size)
1046
+
1047
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._update_annotation_for_padded_image
1048
+ def _update_annotation_for_padded_image(
1049
+ self,
1050
+ annotation: Dict,
1051
+ input_image_size: Tuple[int, int],
1052
+ output_image_size: Tuple[int, int],
1053
+ padding,
1054
+ update_bboxes,
1055
+ ) -> Dict:
1056
+ """
1057
+ Update the annotation for a padded image.
1058
+ """
1059
+ new_annotation = {}
1060
+ new_annotation["size"] = output_image_size
1061
+
1062
+ for key, value in annotation.items():
1063
+ if key == "masks":
1064
+ masks = value
1065
+ masks = pad(
1066
+ masks,
1067
+ padding,
1068
+ mode=PaddingMode.CONSTANT,
1069
+ constant_values=0,
1070
+ input_data_format=ChannelDimension.FIRST,
1071
+ )
1072
+ masks = safe_squeeze(masks, 1)
1073
+ new_annotation["masks"] = masks
1074
+ elif key == "boxes" and update_bboxes:
1075
+ boxes = value
1076
+ boxes *= np.asarray(
1077
+ [
1078
+ input_image_size[1] / output_image_size[1],
1079
+ input_image_size[0] / output_image_size[0],
1080
+ input_image_size[1] / output_image_size[1],
1081
+ input_image_size[0] / output_image_size[0],
1082
+ ]
1083
+ )
1084
+ new_annotation["boxes"] = boxes
1085
+ elif key == "size":
1086
+ new_annotation["size"] = output_image_size
1087
+ else:
1088
+ new_annotation[key] = value
1089
+ return new_annotation
1090
+
1091
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
1092
+ def _pad_image(
1093
+ self,
1094
+ image: np.ndarray,
1095
+ output_size: Tuple[int, int],
1096
+ annotation: Optional[Dict[str, Any]] = None,
1097
+ constant_values: Union[float, Iterable[float]] = 0,
1098
+ data_format: Optional[ChannelDimension] = None,
1099
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1100
+ update_bboxes: bool = True,
1101
+ ) -> np.ndarray:
1102
+ """
1103
+ Pad an image with zeros to the given size.
1104
+ """
1105
+ input_height, input_width = get_image_size(image, channel_dim=input_data_format)
1106
+ output_height, output_width = output_size
1107
+
1108
+ pad_bottom = output_height - input_height
1109
+ pad_right = output_width - input_width
1110
+ padding = ((0, pad_bottom), (0, pad_right))
1111
+ padded_image = pad(
1112
+ image,
1113
+ padding,
1114
+ mode=PaddingMode.CONSTANT,
1115
+ constant_values=constant_values,
1116
+ data_format=data_format,
1117
+ input_data_format=input_data_format,
1118
+ )
1119
+ if annotation is not None:
1120
+ annotation = self._update_annotation_for_padded_image(
1121
+ annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes
1122
+ )
1123
+ return padded_image, annotation
1124
+
1125
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
1126
+ def pad(
1127
+ self,
1128
+ images: List[np.ndarray],
1129
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1130
+ constant_values: Union[float, Iterable[float]] = 0,
1131
+ return_pixel_mask: bool = True,
1132
+ return_tensors: Optional[Union[str, TensorType]] = None,
1133
+ data_format: Optional[ChannelDimension] = None,
1134
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1135
+ update_bboxes: bool = True,
1136
+ ) -> BatchFeature:
1137
+ """
1138
+ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
1139
+ in the batch and optionally returns their corresponding pixel mask.
1140
+
1141
+ Args:
1142
+ images (List[`np.ndarray`]):
1143
+ Images to pad.
1144
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1145
+ Annotations to transform according to the padding that is applied to the images.
1146
+ constant_values (`float` or `Iterable[float]`, *optional*):
1147
+ The value to use for the padding if `mode` is `"constant"`.
1148
+ return_pixel_mask (`bool`, *optional*, defaults to `True`):
1149
+ Whether to return a pixel mask.
1150
+ return_tensors (`str` or `TensorType`, *optional*):
1151
+ The type of tensors to return. Can be one of:
1152
+ - Unset: Return a list of `np.ndarray`.
1153
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
1154
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
1155
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
1156
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
1157
+ data_format (`str` or `ChannelDimension`, *optional*):
1158
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
1159
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1160
+ The channel dimension format of the input image. If not provided, it will be inferred.
1161
+ update_bboxes (`bool`, *optional*, defaults to `True`):
1162
+ Whether to update the bounding boxes in the annotations to match the padded images. If the
1163
+ bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`
1164
+ format, the bounding boxes will not be updated.
1165
+ """
1166
+ pad_size = get_max_height_width(images, input_data_format=input_data_format)
1167
+
1168
+ annotation_list = annotations if annotations is not None else [None] * len(images)
1169
+ padded_images = []
1170
+ padded_annotations = []
1171
+ for image, annotation in zip(images, annotation_list):
1172
+ padded_image, padded_annotation = self._pad_image(
1173
+ image,
1174
+ pad_size,
1175
+ annotation,
1176
+ constant_values=constant_values,
1177
+ data_format=data_format,
1178
+ input_data_format=input_data_format,
1179
+ update_bboxes=update_bboxes,
1180
+ )
1181
+ padded_images.append(padded_image)
1182
+ padded_annotations.append(padded_annotation)
1183
+
1184
+ data = {"pixel_values": padded_images}
1185
+
1186
+ if return_pixel_mask:
1187
+ masks = [
1188
+ make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)
1189
+ for image in images
1190
+ ]
1191
+ data["pixel_mask"] = masks
1192
+
1193
+ encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
1194
+
1195
+ if annotations is not None:
1196
+ encoded_inputs["labels"] = [
1197
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations
1198
+ ]
1199
+
1200
+ return encoded_inputs
1201
+
1202
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
1203
+ def preprocess(
1204
+ self,
1205
+ images: ImageInput,
1206
+ annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
1207
+ return_segmentation_masks: bool = None,
1208
+ masks_path: Optional[Union[str, pathlib.Path]] = None,
1209
+ do_resize: Optional[bool] = None,
1210
+ size: Optional[Dict[str, int]] = None,
1211
+ resample=None, # PILImageResampling
1212
+ do_rescale: Optional[bool] = None,
1213
+ rescale_factor: Optional[Union[int, float]] = None,
1214
+ do_normalize: Optional[bool] = None,
1215
+ do_convert_annotations: Optional[bool] = None,
1216
+ image_mean: Optional[Union[float, List[float]]] = None,
1217
+ image_std: Optional[Union[float, List[float]]] = None,
1218
+ do_pad: Optional[bool] = None,
1219
+ format: Optional[Union[str, AnnotationFormat]] = None,
1220
+ return_tensors: Optional[Union[TensorType, str]] = None,
1221
+ data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
1222
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
1223
+ **kwargs,
1224
+ ) -> BatchFeature:
1225
+ """
1226
+ Preprocess an image or a batch of images so that it can be used by the model.
1227
+
1228
+ Args:
1229
+ images (`ImageInput`):
1230
+ Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging
1231
+ from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.
1232
+ annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
1233
+ List of annotations associated with the image or batch of images. If annotation is for object
1234
+ detection, the annotations should be a dictionary with the following keys:
1235
+ - "image_id" (`int`): The image id.
1236
+ - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
1237
+ dictionary. An image can have no annotations, in which case the list should be empty.
1238
+ If annotation is for segmentation, the annotations should be a dictionary with the following keys:
1239
+ - "image_id" (`int`): The image id.
1240
+ - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
1241
+ An image can have no segments, in which case the list should be empty.
1242
+ - "file_name" (`str`): The file name of the image.
1243
+ return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
1244
+ Whether to return segmentation masks.
1245
+ masks_path (`str` or `pathlib.Path`, *optional*):
1246
+ Path to the directory containing the segmentation masks.
1247
+ do_resize (`bool`, *optional*, defaults to self.do_resize):
1248
+ Whether to resize the image.
1249
+ size (`Dict[str, int]`, *optional*, defaults to self.size):
1250
+ Size of the image after resizing.
1251
+ resample (`PILImageResampling`, *optional*, defaults to self.resample):
1252
+ Resampling filter to use when resizing the image.
1253
+ do_rescale (`bool`, *optional*, defaults to self.do_rescale):
1254
+ Whether to rescale the image.
1255
+ rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
1256
+ Rescale factor to use when rescaling the image.
1257
+ do_normalize (`bool`, *optional*, defaults to self.do_normalize):
1258
+ Whether to normalize the image.
1259
+ do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):
1260
+ Whether to convert the annotations to the format expected by the model. Converts the bounding
1261
+ boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`
1262
+ and in relative coordinates.
1263
+ image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
1264
+ Mean to use when normalizing the image.
1265
+ image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
1266
+ Standard deviation to use when normalizing the image.
1267
+ do_pad (`bool`, *optional*, defaults to self.do_pad):
1268
+ Whether to pad the image. If `True` will pad the images in the batch to the largest image in the batch
1269
+ and create a pixel mask. Padding will be applied to the bottom and right of the image with zeros.
1270
+ format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):
1271
+ Format of the annotations.
1272
+ return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
1273
+ Type of tensors to return. If `None`, will return the list of images.
1274
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
1275
+ The channel dimension format for the output image. Can be one of:
1276
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1277
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1278
+ - Unset: Use the channel dimension format of the input image.
1279
+ input_data_format (`ChannelDimension` or `str`, *optional*):
1280
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
1281
+ from the input image. Can be one of:
1282
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
1283
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
1284
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
1285
+ """
1286
+ if "pad_and_return_pixel_mask" in kwargs:
1287
+ logger.warning_once(
1288
+ "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
1289
+ "use `do_pad` instead."
1290
+ )
1291
+ do_pad = kwargs.pop("pad_and_return_pixel_mask")
1292
+
1293
+ max_size = None
1294
+ if "max_size" in kwargs:
1295
+ logger.warning_once(
1296
+ "The `max_size` argument is deprecated and will be removed in a future version, use"
1297
+ " `size['longest_edge']` instead."
1298
+ )
1299
+ size = kwargs.pop("max_size")
1300
+
1301
+ do_resize = self.do_resize if do_resize is None else do_resize
1302
+ size = self.size if size is None else size
1303
+ size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
1304
+ resample = self.resample if resample is None else resample
1305
+ do_rescale = self.do_rescale if do_rescale is None else do_rescale
1306
+ rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
1307
+ do_normalize = self.do_normalize if do_normalize is None else do_normalize
1308
+ image_mean = self.image_mean if image_mean is None else image_mean
1309
+ image_std = self.image_std if image_std is None else image_std
1310
+ do_convert_annotations = (
1311
+ self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations
1312
+ )
1313
+ do_pad = self.do_pad if do_pad is None else do_pad
1314
+ format = self.format if format is None else format
1315
+
1316
+ images = make_list_of_images(images)
1317
+
1318
+ if not valid_images(images):
1319
+ raise ValueError(
1320
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
1321
+ "torch.Tensor, tf.Tensor or jax.ndarray."
1322
+ )
1323
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
1324
+
1325
+ # Here, the pad() method pads to the maximum of (width, height). It does not need to be validated.
1326
+ validate_preprocess_arguments(
1327
+ do_rescale=do_rescale,
1328
+ rescale_factor=rescale_factor,
1329
+ do_normalize=do_normalize,
1330
+ image_mean=image_mean,
1331
+ image_std=image_std,
1332
+ do_resize=do_resize,
1333
+ size=size,
1334
+ resample=resample,
1335
+ )
1336
+
1337
+ if annotations is not None and isinstance(annotations, dict):
1338
+ annotations = [annotations]
1339
+
1340
+ if annotations is not None and len(images) != len(annotations):
1341
+ raise ValueError(
1342
+ f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
1343
+ )
1344
+
1345
+ format = AnnotationFormat(format)
1346
+ if annotations is not None:
1347
+ validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
1348
+
1349
+ if (
1350
+ masks_path is not None
1351
+ and format == AnnotationFormat.COCO_PANOPTIC
1352
+ and not isinstance(masks_path, (pathlib.Path, str))
1353
+ ):
1354
+ raise ValueError(
1355
+ "The path to the directory containing the mask PNG files should be provided as a"
1356
+ f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
1357
+ )
1358
+
1359
+ # All transformations expect numpy arrays
1360
+ images = [to_numpy_array(image) for image in images]
1361
+
1362
+ if is_scaled_image(images[0]) and do_rescale:
1363
+ logger.warning_once(
1364
+ "It looks like you are trying to rescale already rescaled images. If the input"
1365
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
1366
+ )
1367
+
1368
+ if input_data_format is None:
1369
+ # We assume that all images have the same channel dimension format.
1370
+ input_data_format = infer_channel_dimension_format(images[0])
1371
+
1372
+ # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
1373
+ if annotations is not None:
1374
+ prepared_images = []
1375
+ prepared_annotations = []
1376
+ for image, target in zip(images, annotations):
1377
+ target = self.prepare_annotation(
1378
+ image,
1379
+ target,
1380
+ format,
1381
+ return_segmentation_masks=return_segmentation_masks,
1382
+ masks_path=masks_path,
1383
+ input_data_format=input_data_format,
1384
+ )
1385
+ prepared_images.append(image)
1386
+ prepared_annotations.append(target)
1387
+ images = prepared_images
1388
+ annotations = prepared_annotations
1389
+ del prepared_images, prepared_annotations
1390
+
1391
+ # transformations
1392
+ if do_resize:
1393
+ if annotations is not None:
1394
+ resized_images, resized_annotations = [], []
1395
+ for image, target in zip(images, annotations):
1396
+ orig_size = get_image_size(image, input_data_format)
1397
+ resized_image = self.resize(
1398
+ image, size=size, max_size=max_size, resample=resample, input_data_format=input_data_format
1399
+ )
1400
+ resized_annotation = self.resize_annotation(
1401
+ target, orig_size, get_image_size(resized_image, input_data_format)
1402
+ )
1403
+ resized_images.append(resized_image)
1404
+ resized_annotations.append(resized_annotation)
1405
+ images = resized_images
1406
+ annotations = resized_annotations
1407
+ del resized_images, resized_annotations
1408
+ else:
1409
+ images = [
1410
+ self.resize(image, size=size, resample=resample, input_data_format=input_data_format)
1411
+ for image in images
1412
+ ]
1413
+
1414
+ if do_rescale:
1415
+ images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]
1416
+
1417
+ if do_normalize:
1418
+ images = [
1419
+ self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images
1420
+ ]
1421
+
1422
+ if do_convert_annotations and annotations is not None:
1423
+ annotations = [
1424
+ self.normalize_annotation(annotation, get_image_size(image, input_data_format))
1425
+ for annotation, image in zip(annotations, images)
1426
+ ]
1427
+
1428
+ if do_pad:
1429
+ # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
1430
+ encoded_inputs = self.pad(
1431
+ images,
1432
+ annotations=annotations,
1433
+ return_pixel_mask=True,
1434
+ data_format=data_format,
1435
+ input_data_format=input_data_format,
1436
+ update_bboxes=do_convert_annotations,
1437
+ return_tensors=return_tensors,
1438
+ )
1439
+ else:
1440
+ images = [
1441
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
1442
+ for image in images
1443
+ ]
1444
+ encoded_inputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
1445
+ if annotations is not None:
1446
+ encoded_inputs["labels"] = [
1447
+ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
1448
+ ]
1449
+
1450
+ return encoded_inputs
1451
+
1452
+ # POSTPROCESSING METHODS - TODO: add support for other frameworks
1453
+ def post_process(self, outputs, target_sizes):
1454
+ """
1455
+ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the Pascal VOC format (xmin, ymin, xmax, ymax).
1456
+ Only supports PyTorch.
1457
+
1458
+ Args:
1459
+ outputs ([`ConditionalDetrObjectDetectionOutput`]):
1460
+ Raw outputs of the model.
1461
+ target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
1462
+ Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
1463
+ image size (before any data augmentation). For visualization, this should be the image size after data
1464
+ augment, but before padding.
1465
+ Returns:
1466
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1467
+ in the batch as predicted by the model.
1468
+ """
1469
+ logging.warning_once(
1470
+ "`post_process` is deprecated and will be removed in v5 of Transformers, please use"
1471
+ " `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
1472
+ )
1473
+
1474
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1475
+
1476
+ if len(out_logits) != len(target_sizes):
1477
+ raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
1478
+ if target_sizes.shape[1] != 2:
1479
+ raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
1480
+
1481
+ prob = out_logits.sigmoid()
1482
+ topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)
1483
+ scores = topk_values
1484
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1485
+ labels = topk_indexes % out_logits.shape[2]
1486
+ boxes = center_to_corners_format(out_bbox)
1487
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1488
+
1489
+ # and from relative [0, 1] to absolute [0, height] coordinates
1490
+ img_h, img_w = target_sizes.unbind(1)
1491
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
1492
+ boxes = boxes * scale_fct[:, None, :]
1493
+
1494
+ results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
1495
+
1496
+ return results
1497
+
1498
+ # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr
1499
+ def post_process_object_detection(
1500
+ self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, top_k: int = 100
1501
+ ):
1502
+ """
1503
+ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x,
1504
+ top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
1505
+
1506
+ Args:
1507
+ outputs ([`DetrObjectDetectionOutput`]):
1508
+ Raw outputs of the model.
1509
+ threshold (`float`, *optional*):
1510
+ Score threshold to keep object detection predictions.
1511
+ target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
1512
+ Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
1513
+ (height, width) of each image in the batch. If left to None, predictions will not be resized.
1514
+ top_k (`int`, *optional*, defaults to 100):
1515
+ Keep only top k bounding boxes before filtering by thresholding.
1516
+
1517
+ Returns:
1518
+ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
1519
+ in the batch as predicted by the model.
1520
+ """
1521
+ out_logits, out_bbox = outputs.logits, outputs.pred_boxes
1522
+
1523
+ if target_sizes is not None:
1524
+ if len(out_logits) != len(target_sizes):
1525
+ raise ValueError(
1526
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1527
+ )
1528
+
1529
+ prob = out_logits.sigmoid()
1530
+ prob = prob.view(out_logits.shape[0], -1)
1531
+ k_value = min(top_k, prob.size(1))
1532
+ topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
1533
+ scores = topk_values
1534
+ topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
1535
+ labels = topk_indexes % out_logits.shape[2]
1536
+ boxes = center_to_corners_format(out_bbox)
1537
+ boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
1538
+
1539
+ # and from relative [0, 1] to absolute [0, height] coordinates
1540
+ if target_sizes is not None:
1541
+ if isinstance(target_sizes, List):
1542
+ img_h = torch.Tensor([i[0] for i in target_sizes])
1543
+ img_w = torch.Tensor([i[1] for i in target_sizes])
1544
+ else:
1545
+ img_h, img_w = target_sizes.unbind(1)
1546
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
1547
+ boxes = boxes * scale_fct[:, None, :]
1548
+
1549
+ results = []
1550
+ for s, l, b in zip(scores, labels, boxes):
1551
+ score = s[s > threshold]
1552
+ label = l[s > threshold]
1553
+ box = b[s > threshold]
1554
+ results.append({"scores": score, "labels": label, "boxes": box})
1555
+
1556
+ return results
1557
+
1558
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr
1559
+ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None):
1560
+ """
1561
+ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
1562
+
1563
+ Args:
1564
+ outputs ([`ConditionalDetrForSegmentation`]):
1565
+ Raw outputs of the model.
1566
+ target_sizes (`List[Tuple[int, int]]`, *optional*):
1567
+ A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the
1568
+ batch. If unset, predictions will not be resized.
1569
+ Returns:
1570
+ `List[torch.Tensor]`:
1571
+ A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
1572
+ corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
1573
+ `torch.Tensor` correspond to a semantic class id.
1574
+ """
1575
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1576
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1577
+
1578
+ # Remove the null class `[..., :-1]`
1579
+ masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
1580
+ masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1581
+
1582
+ # Semantic segmentation logits of shape (batch_size, num_classes, height, width)
1583
+ segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
1584
+ batch_size = class_queries_logits.shape[0]
1585
+
1586
+ # Resize logits and compute semantic segmentation maps
1587
+ if target_sizes is not None:
1588
+ if batch_size != len(target_sizes):
1589
+ raise ValueError(
1590
+ "Make sure that you pass in as many target sizes as the batch dimension of the logits"
1591
+ )
1592
+
1593
+ semantic_segmentation = []
1594
+ for idx in range(batch_size):
1595
+ resized_logits = nn.functional.interpolate(
1596
+ segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
1597
+ )
1598
+ semantic_map = resized_logits[0].argmax(dim=0)
1599
+ semantic_segmentation.append(semantic_map)
1600
+ else:
1601
+ semantic_segmentation = segmentation.argmax(dim=1)
1602
+ semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
1603
+
1604
+ return semantic_segmentation
1605
+
1606
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr
1607
+ def post_process_instance_segmentation(
1608
+ self,
1609
+ outputs,
1610
+ threshold: float = 0.5,
1611
+ mask_threshold: float = 0.5,
1612
+ overlap_mask_area_threshold: float = 0.8,
1613
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1614
+ return_coco_annotation: Optional[bool] = False,
1615
+ ) -> List[Dict]:
1616
+ """
1617
+ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
1618
+
1619
+ Args:
1620
+ outputs ([`ConditionalDetrForSegmentation`]):
1621
+ Raw outputs of the model.
1622
+ threshold (`float`, *optional*, defaults to 0.5):
1623
+ The probability score threshold to keep predicted instance masks.
1624
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1625
+ Threshold to use when turning the predicted masks into binary values.
1626
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1627
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1628
+ instance mask.
1629
+ target_sizes (`List[Tuple]`, *optional*):
1630
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1631
+ final size (height, width) of each prediction. If unset, predictions will not be resized.
1632
+ return_coco_annotation (`bool`, *optional*):
1633
+ Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
1634
+ format.
1635
+ Returns:
1636
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1637
+ - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1638
+ `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
1639
+ `True`. Set to `None` if no mask if found above `threshold`.
1640
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1641
+ - **id** -- An integer representing the `segment_id`.
1642
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1643
+ - **score** -- Prediction score of segment with `segment_id`.
1644
+ """
1645
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1646
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1647
+
1648
+ batch_size = class_queries_logits.shape[0]
1649
+ num_labels = class_queries_logits.shape[-1] - 1
1650
+
1651
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1652
+
1653
+ # Predicted label and score of each query (batch_size, num_queries)
1654
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1655
+
1656
+ # Loop over items in batch size
1657
+ results: List[Dict[str, TensorType]] = []
1658
+
1659
+ for i in range(batch_size):
1660
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1661
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1662
+ )
1663
+
1664
+ # No mask found
1665
+ if mask_probs_item.shape[0] <= 0:
1666
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1667
+ segmentation = torch.zeros((height, width)) - 1
1668
+ results.append({"segmentation": segmentation, "segments_info": []})
1669
+ continue
1670
+
1671
+ # Get segmentation map and segment information of batch item
1672
+ target_size = target_sizes[i] if target_sizes is not None else None
1673
+ segmentation, segments = compute_segments(
1674
+ mask_probs=mask_probs_item,
1675
+ pred_scores=pred_scores_item,
1676
+ pred_labels=pred_labels_item,
1677
+ mask_threshold=mask_threshold,
1678
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1679
+ label_ids_to_fuse=[],
1680
+ target_size=target_size,
1681
+ )
1682
+
1683
+ # Return segmentation map in run-length encoding (RLE) format
1684
+ if return_coco_annotation:
1685
+ segmentation = convert_segmentation_to_rle(segmentation)
1686
+
1687
+ results.append({"segmentation": segmentation, "segments_info": segments})
1688
+ return results
1689
+
1690
+ # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr
1691
+ def post_process_panoptic_segmentation(
1692
+ self,
1693
+ outputs,
1694
+ threshold: float = 0.5,
1695
+ mask_threshold: float = 0.5,
1696
+ overlap_mask_area_threshold: float = 0.8,
1697
+ label_ids_to_fuse: Optional[Set[int]] = None,
1698
+ target_sizes: Optional[List[Tuple[int, int]]] = None,
1699
+ ) -> List[Dict]:
1700
+ """
1701
+ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports
1702
+ PyTorch.
1703
+
1704
+ Args:
1705
+ outputs ([`ConditionalDetrForSegmentation`]):
1706
+ The outputs from [`ConditionalDetrForSegmentation`].
1707
+ threshold (`float`, *optional*, defaults to 0.5):
1708
+ The probability score threshold to keep predicted instance masks.
1709
+ mask_threshold (`float`, *optional*, defaults to 0.5):
1710
+ Threshold to use when turning the predicted masks into binary values.
1711
+ overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
1712
+ The overlap mask area threshold to merge or discard small disconnected parts within each binary
1713
+ instance mask.
1714
+ label_ids_to_fuse (`Set[int]`, *optional*):
1715
+ The labels in this state will have all their instances be fused together. For instance we could say
1716
+ there can only be one sky in an image, but several persons, so the label ID for sky would be in that
1717
+ set, but not the one for person.
1718
+ target_sizes (`List[Tuple]`, *optional*):
1719
+ List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested
1720
+ final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
1721
+ Returns:
1722
+ `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
1723
+ - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
1724
+ `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
1725
+ the corresponding `target_sizes` entry.
1726
+ - **segments_info** -- A dictionary that contains additional information on each segment.
1727
+ - **id** -- an integer representing the `segment_id`.
1728
+ - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
1729
+ - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
1730
+ Multiple instances of the same class / label were fused and assigned a single `segment_id`.
1731
+ - **score** -- Prediction score of segment with `segment_id`.
1732
+ """
1733
+
1734
+ if label_ids_to_fuse is None:
1735
+ logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
1736
+ label_ids_to_fuse = set()
1737
+
1738
+ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
1739
+ masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
1740
+
1741
+ batch_size = class_queries_logits.shape[0]
1742
+ num_labels = class_queries_logits.shape[-1] - 1
1743
+
1744
+ mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
1745
+
1746
+ # Predicted label and score of each query (batch_size, num_queries)
1747
+ pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
1748
+
1749
+ # Loop over items in batch size
1750
+ results: List[Dict[str, TensorType]] = []
1751
+
1752
+ for i in range(batch_size):
1753
+ mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
1754
+ mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
1755
+ )
1756
+
1757
+ # No mask found
1758
+ if mask_probs_item.shape[0] <= 0:
1759
+ height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
1760
+ segmentation = torch.zeros((height, width)) - 1
1761
+ results.append({"segmentation": segmentation, "segments_info": []})
1762
+ continue
1763
+
1764
+ # Get segmentation map and segment information of batch item
1765
+ target_size = target_sizes[i] if target_sizes is not None else None
1766
+ segmentation, segments = compute_segments(
1767
+ mask_probs=mask_probs_item,
1768
+ pred_scores=pred_scores_item,
1769
+ pred_labels=pred_labels_item,
1770
+ mask_threshold=mask_threshold,
1771
+ overlap_mask_area_threshold=overlap_mask_area_threshold,
1772
+ label_ids_to_fuse=label_ids_to_fuse,
1773
+ target_size=target_size,
1774
+ )
1775
+
1776
+ results.append({"segmentation": segmentation, "segments_info": segments})
1777
+ return results