applied-ai-018 commited on
Commit
f83d987
·
verified ·
1 Parent(s): d9aa64c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  3. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/config.yaml +43 -0
  4. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log +28 -0
  5. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/requirements.txt +163 -0
  6. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-metadata.json +810 -0
  7. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-summary.json +1 -0
  8. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/logs/debug-internal.log +194 -0
  9. lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/run-gpqnyvgo.wandb +0 -0
  10. lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug-internal.log +180 -0
  11. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/config.yaml +43 -0
  12. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log +34 -0
  13. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/requirements.txt +155 -0
  14. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-metadata.json +850 -0
  15. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-summary.json +1 -0
  16. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug-internal.log +185 -0
  17. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug.log +29 -0
  18. lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/run-xred40sp.wandb +0 -0
  19. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/config.yaml +43 -0
  20. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log +34 -0
  21. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/requirements.txt +155 -0
  22. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-metadata.json +850 -0
  23. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-summary.json +1 -0
  24. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug-internal.log +183 -0
  25. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug.log +29 -0
  26. lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/run-6ht0x2b2.wandb +0 -0
  27. lm-evaluation-harness/wandb/run-20240608_122925-vmbmpokf/run-vmbmpokf.wandb +3 -0
  28. venv/lib/python3.10/site-packages/portalocker/__about__.py +6 -0
  29. venv/lib/python3.10/site-packages/portalocker/__init__.py +76 -0
  30. venv/lib/python3.10/site-packages/portalocker/__main__.py +98 -0
  31. venv/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/portalocker/constants.py +58 -0
  40. venv/lib/python3.10/site-packages/portalocker/exceptions.py +27 -0
  41. venv/lib/python3.10/site-packages/portalocker/portalocker.py +117 -0
  42. venv/lib/python3.10/site-packages/portalocker/py.typed +0 -0
  43. venv/lib/python3.10/site-packages/portalocker/redis.py +236 -0
  44. venv/lib/python3.10/site-packages/portalocker/utils.py +563 -0
  45. venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/INSTALLER +1 -0
  46. venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/entry_points.txt +2 -0
  47. venv/lib/python3.10/site-packages/yaml/__init__.py +390 -0
  48. venv/lib/python3.10/site-packages/yaml/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/yaml/__pycache__/composer.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/yaml/__pycache__/constructor.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -122,3 +122,4 @@ lm-evaluation-harness/wandb/run-20240608_162527-0vroecl5/run-0vroecl5.wandb filt
122
  lm-evaluation-harness/wandb/run-20240608_150638-5wz6ydpy/run-5wz6ydpy.wandb filter=lfs diff=lfs merge=lfs -text
123
  lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb filter=lfs diff=lfs merge=lfs -text
124
  lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb filter=lfs diff=lfs merge=lfs -text
 
 
122
  lm-evaluation-harness/wandb/run-20240608_150638-5wz6ydpy/run-5wz6ydpy.wandb filter=lfs diff=lfs merge=lfs -text
123
  lm-evaluation-harness/wandb/run-20240605_123856-sm8adh93/run-sm8adh93.wandb filter=lfs diff=lfs merge=lfs -text
124
  lm-evaluation-harness/wandb/run-20240605_062502-fvj55jrc/run-fvj55jrc.wandb filter=lfs diff=lfs merge=lfs -text
125
+ lm-evaluation-harness/wandb/run-20240608_122925-vmbmpokf/run-vmbmpokf.wandb filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:468e078a04dc404cda552ad8e961071c7814ac4bdead3e51a884eecff7adb727
3
+ size 33555612
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.40.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1715704464
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.40.2
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-14:16:34:25,093 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-14:16:34:30,618 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi']
4
+ 2024-05-14:16:34:30,622 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-14:16:34:30,622 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step100'}
6
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/core/register.py:145: UserWarning: "hpu:X" notation is not supported by Gaudi PyTorch intergration bridge. Please change to "hpu" without index (Triggered internally at /npu-stack/pytorch-integration/pytorch_helpers/lazy_to_backend.cpp:53.)
7
+ return func(*args, **kwargs)
8
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
9
+ warnings.warn(
10
+ [2024-05-14 16:34:41,218] [INFO] [real_accelerator.py:178:get_accelerator] Setting ds_accelerator to hpu (auto detect)
11
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/hpu/__init__.py:158: UserWarning: torch.hpu.setDeterministic is deprecated and will be removed in next release. Please use torch.use_deterministic_algorithms instead.
12
+ warnings.warn(
13
+ You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
14
+ 2024-05-14:16:34:41,617 WARNING [task.py:763] [Task: indiccopa-hi] metric acc is defined, but aggregation is not. using default aggregation=mean
15
+ 2024-05-14:16:34:41,617 WARNING [task.py:775] [Task: indiccopa-hi] metric acc is defined, but higher_is_better is not. using default higher_is_better=True
16
+ /usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for ai4bharat/IndicCOPA contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/ai4bharat/IndicCOPA
17
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
18
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
19
+ warnings.warn(
20
+ 2024-05-14:16:34:42,858 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
21
+ 2024-05-14:16:34:42,858 WARNING [task.py:322] [Task: indiccopa-hi] has_training_docs and has_validation_docs are False, using test_docs as fewshot_docs but this is not recommended.
22
+ 2024-05-14:16:34:42,879 INFO [task.py:395] Building contexts for indiccopa-hi on rank 5...
23
+ 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 56/56 [00:00<00:00, 96698.65it/s]
24
+ 2024-05-14:16:34:44,718 INFO [evaluator.py:379] Running loglikelihood requests
25
+ Passed argument batch_size = auto:1. Detecting largest batch size
26
+ Determined largest batch size: 64
27
+ /usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way.
28
+ warnings.warn(
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/requirements.txt ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.3
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.2
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.4
12
+ aiosignal==1.3.1
13
+ antlr4-python3-runtime==4.9.3
14
+ anyio==4.3.0
15
+ async-timeout==4.0.3
16
+ attrs==23.2.0
17
+ av==9.2.0
18
+ cachetools==5.3.3
19
+ certifi==2024.2.2
20
+ cffi==1.15.1
21
+ cfgv==3.4.0
22
+ chardet==5.2.0
23
+ charset-normalizer==3.3.2
24
+ click==8.1.7
25
+ cmake==3.29.2
26
+ colorama==0.4.6
27
+ datasets==2.19.1
28
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
29
+ dill==0.3.8
30
+ distlib==0.3.8
31
+ distro==1.9.0
32
+ docker-pycreds==0.4.0
33
+ einops==0.8.0
34
+ evaluate==0.4.2
35
+ exceptiongroup==1.2.0
36
+ expecttest==0.2.1
37
+ filelock==3.13.4
38
+ frozenlist==1.4.1
39
+ fsspec==2024.3.1
40
+ gitdb==4.0.11
41
+ google-auth-oauthlib==0.4.6
42
+ google-auth==2.29.0
43
+ grpcio==1.62.1
44
+ h11==0.14.0
45
+ habana-media-loader==1.15.1.15
46
+ habana-pyhlml==1.15.1.15
47
+ habana-torch-dataloader==1.15.1.15
48
+ habana-torch-plugin==1.15.1.15
49
+ habana_gpu_migration==1.15.1.15
50
+ habana_quantization_toolkit==1.15.1.15
51
+ hjson==3.1.0
52
+ httpcore==1.0.5
53
+ httpx==0.27.0
54
+ huggingface-hub==0.23.0
55
+ identify==2.5.35
56
+ idna==3.7
57
+ importlib_resources==6.4.0
58
+ iniconfig==2.0.0
59
+ joblib==1.4.2
60
+ jsonlines==4.0.0
61
+ lightning-habana==1.4.0
62
+ lightning-utilities==0.11.2
63
+ lightning==2.2.0.post0
64
+ lm_eval==0.3.0
65
+ lm_eval==0.4.2
66
+ lm_eval==0.4.2
67
+ lm_eval==0.4.2
68
+ mbstrdecoder==1.1.3
69
+ more-itertools==10.2.0
70
+ mpi4py==3.1.4
71
+ mpmath==1.3.0
72
+ multidict==6.0.5
73
+ multiprocess==0.70.16
74
+ networkx==3.3
75
+ ninja==1.11.1.1
76
+ nltk==3.8.1
77
+ nodeenv==1.8.0
78
+ numexpr==2.10.0
79
+ numpy==1.23.5
80
+ oauthlib==3.2.2
81
+ omegaconf==2.3.0
82
+ openai==1.29.0
83
+ packaging==24.0
84
+ pandas==2.0.1
85
+ pathspec==0.12.1
86
+ pathvalidate==3.2.0
87
+ peft==0.10.0
88
+ perfetto==0.7.0
89
+ pip==22.0.2
90
+ pip==23.3.1
91
+ platformdirs==4.2.0
92
+ pluggy==1.4.0
93
+ portalocker==2.8.2
94
+ pre-commit==3.3.3
95
+ protobuf==3.20.3
96
+ psutil==5.9.8
97
+ py-cpuinfo==9.0.0
98
+ pyarrow-hotfix==0.6
99
+ pyarrow==16.0.0
100
+ pyasn1==0.6.0
101
+ pyasn1_modules==0.4.0
102
+ pybind11==2.10.4
103
+ pycountry==23.12.11
104
+ pycparser==2.22
105
+ pydantic==1.10.13
106
+ pynvml==8.0.4
107
+ pytablewriter==1.2.0
108
+ pytest==8.1.1
109
+ python-dateutil==2.9.0.post0
110
+ pytorch-lightning==2.2.2
111
+ pytz==2024.1
112
+ regex==2023.5.5
113
+ requests-oauthlib==2.0.0
114
+ requests==2.31.0
115
+ rouge_score==0.1.2
116
+ rsa==4.9
117
+ sacrebleu==1.5.0
118
+ safetensors==0.4.3
119
+ scikit-learn==1.4.2
120
+ scipy==1.13.0
121
+ sentencepiece==0.2.0
122
+ sentry-sdk==2.1.1
123
+ setproctitle==1.3.3
124
+ setuptools==59.6.0
125
+ setuptools==69.5.1
126
+ six==1.16.0
127
+ smmap==5.0.1
128
+ sniffio==1.3.1
129
+ sqlitedict==2.1.0
130
+ symengine==0.11.0
131
+ sympy==1.12
132
+ tabledata==1.3.3
133
+ tcolorpy==0.1.6
134
+ tdqm==0.0.1
135
+ tensorboard-data-server==0.6.1
136
+ tensorboard-plugin-wit==1.8.1
137
+ tensorboard==2.11.2
138
+ threadpoolctl==3.5.0
139
+ tokenizers==0.19.1
140
+ tomli==2.0.1
141
+ torch==2.2.0a0+git8964477
142
+ torch_tb_profiler==0.4.0
143
+ torchaudio==2.2.0+08901ad
144
+ torchdata==0.7.1+5e6f7b7
145
+ torchmetrics==1.3.2
146
+ torchtext==0.17.0+400da5c
147
+ torchvision==0.17.0+b2383d4
148
+ tqdm-multiprocess==0.0.11
149
+ tqdm==4.66.2
150
+ transformers==4.40.2
151
+ typepy==1.3.2
152
+ typing_extensions==4.11.0
153
+ tzdata==2024.1
154
+ urllib3==1.26.18
155
+ virtualenv==20.25.1
156
+ wandb==0.17.0
157
+ wheel==0.37.1
158
+ wheel==0.43.0
159
+ word2number==1.1
160
+ xxhash==3.4.1
161
+ yamllint==1.35.1
162
+ yarl==1.9.4
163
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T16:34:24.956403",
5
+ "startedAt": "2024-05-14T16:34:24.504649",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3394.3326644736844,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3208.609,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3208.512,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3208.417,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3400.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3400.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3400.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3400.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3400.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3400.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3400.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3400.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3400.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3400.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3400.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3400.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3400.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3345.501,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3345.731,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3400.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3400.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3375.983,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3304.616,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3400.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3400.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3400.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3400.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3400.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3400.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3400.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3400.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3400.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3355.491,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3400.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3400.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 863.4841499328613
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 27}}
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/logs/debug-internal.log ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 16:34:24,515 INFO StreamThr :117718 [internal.py:wandb_internal():85] W&B internal server running at pid: 117718, started at: 2024-05-14 16:34:24.515144
2
+ 2024-05-14 16:34:24,518 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 16:34:24,519 INFO WriterThread:117718 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/run-gpqnyvgo.wandb
4
+ 2024-05-14 16:34:24,520 DEBUG SenderThread:117718 [sender.py:send():378] send: header
5
+ 2024-05-14 16:34:24,530 DEBUG SenderThread:117718 [sender.py:send():378] send: run
6
+ 2024-05-14 16:34:24,797 INFO SenderThread:117718 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files
7
+ 2024-05-14 16:34:24,797 INFO SenderThread:117718 [sender.py:_start_run_threads():1123] run started: gpqnyvgo with start time 1715704464.514761
8
+ 2024-05-14 16:34:24,804 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-14 16:34:24,804 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-14 16:34:24,888 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-14 16:34:24,890 DEBUG HandlerThread:117718 [system_info.py:__init__():26] System info init
12
+ 2024-05-14 16:34:24,890 DEBUG HandlerThread:117718 [system_info.py:__init__():41] System info init done
13
+ 2024-05-14 16:34:24,890 INFO HandlerThread:117718 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-14 16:34:24,890 INFO SystemMonitor:117718 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-14 16:34:24,890 INFO HandlerThread:117718 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-14 16:34:24,890 INFO SystemMonitor:117718 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-14 16:34:24,893 INFO SystemMonitor:117718 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-14 16:34:24,893 INFO SystemMonitor:117718 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-14 16:34:24,893 INFO SystemMonitor:117718 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-14 16:34:24,956 DEBUG HandlerThread:117718 [system_info.py:probe():150] Probing system
21
+ 2024-05-14 16:34:24,964 DEBUG HandlerThread:117718 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-14 16:34:24,983 ERROR HandlerThread:117718 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /data/cronscript/lm-evaluation-harness'
28
+ 2024-05-14 16:34:24,984 DEBUG HandlerThread:117718 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-14 16:34:24,984 DEBUG HandlerThread:117718 [system_info.py:probe():198] Probing system done
30
+ 2024-05-14 16:34:24,984 DEBUG HandlerThread:117718 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:34:24.956403', 'startedAt': '2024-05-14T16:34:24.504649', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step100', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3394.3326644736844, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3208.609, 'min': 800.0, 'max': 3400.0}, {'current': 3208.512, 'min': 800.0, 'max': 3400.0}, {'current': 3208.417, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3345.501, 'min': 800.0, 'max': 3400.0}, {'current': 3345.731, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3375.983, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3304.616, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3355.491, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4841499328613}}, 'memory': {'total': 1007.5000267028809}}
31
+ 2024-05-14 16:34:24,984 INFO HandlerThread:117718 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-14 16:34:24,984 INFO HandlerThread:117718 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-14 16:34:24,985 INFO HandlerThread:117718 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-14 16:34:24,989 DEBUG SenderThread:117718 [sender.py:send():378] send: files
35
+ 2024-05-14 16:34:24,989 INFO SenderThread:117718 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-14 16:34:25,088 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-14 16:34:25,089 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-14 16:34:25,089 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-14 16:34:25,089 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-14 16:34:25,247 DEBUG SenderThread:117718 [sender.py:send():378] send: telemetry
41
+ 2024-05-14 16:34:25,511 INFO wandb-upload_0:117718 [upload_job.py:push():130] Uploaded file /tmp/tmp95oaxk4bwandb/81dqsdmz-wandb-metadata.json
42
+ 2024-05-14 16:34:25,798 INFO Thread-12 :117718 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/requirements.txt
43
+ 2024-05-14 16:34:25,798 INFO Thread-12 :117718 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-metadata.json
44
+ 2024-05-14 16:34:25,799 INFO Thread-12 :117718 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
45
+ 2024-05-14 16:34:27,798 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
46
+ 2024-05-14 16:34:30,249 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-14 16:34:31,800 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
48
+ 2024-05-14 16:34:35,623 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-14 16:34:39,806 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
50
+ 2024-05-14 16:34:40,090 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: stop_status
51
+ 2024-05-14 16:34:40,091 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: stop_status
52
+ 2024-05-14 16:34:41,214 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
53
+ 2024-05-14 16:34:43,809 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
54
+ 2024-05-14 16:34:45,810 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
55
+ 2024-05-14 16:34:46,282 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
56
+ 2024-05-14 16:34:47,812 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
57
+ 2024-05-14 16:34:51,282 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
58
+ 2024-05-14 16:34:52,295 DEBUG SenderThread:117718 [sender.py:send():378] send: exit
59
+ 2024-05-14 16:34:52,296 INFO SenderThread:117718 [sender.py:send_exit():585] handling exit code: 0
60
+ 2024-05-14 16:34:52,296 INFO SenderThread:117718 [sender.py:send_exit():587] handling runtime: 27
61
+ 2024-05-14 16:34:52,297 INFO SenderThread:117718 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
62
+ 2024-05-14 16:34:52,297 INFO SenderThread:117718 [sender.py:send_exit():593] send defer
63
+ 2024-05-14 16:34:52,297 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
64
+ 2024-05-14 16:34:52,297 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 0
65
+ 2024-05-14 16:34:52,297 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
66
+ 2024-05-14 16:34:52,297 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 0
67
+ 2024-05-14 16:34:52,297 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 1
68
+ 2024-05-14 16:34:52,297 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
69
+ 2024-05-14 16:34:52,297 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 1
70
+ 2024-05-14 16:34:52,298 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
71
+ 2024-05-14 16:34:52,298 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 1
72
+ 2024-05-14 16:34:52,298 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 2
73
+ 2024-05-14 16:34:52,298 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
74
+ 2024-05-14 16:34:52,298 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 2
75
+ 2024-05-14 16:34:52,298 INFO HandlerThread:117718 [system_monitor.py:finish():203] Stopping system monitor
76
+ 2024-05-14 16:34:52,298 DEBUG SystemMonitor:117718 [system_monitor.py:_start():172] Starting system metrics aggregation loop
77
+ 2024-05-14 16:34:52,299 INFO HandlerThread:117718 [interfaces.py:finish():200] Joined cpu monitor
78
+ 2024-05-14 16:34:52,299 DEBUG SystemMonitor:117718 [system_monitor.py:_start():179] Finished system metrics aggregation loop
79
+ 2024-05-14 16:34:52,299 INFO HandlerThread:117718 [interfaces.py:finish():200] Joined disk monitor
80
+ 2024-05-14 16:34:52,299 DEBUG SystemMonitor:117718 [system_monitor.py:_start():183] Publishing last batch of metrics
81
+ 2024-05-14 16:34:52,299 INFO HandlerThread:117718 [interfaces.py:finish():200] Joined memory monitor
82
+ 2024-05-14 16:34:52,302 INFO HandlerThread:117718 [interfaces.py:finish():200] Joined network monitor
83
+ 2024-05-14 16:34:52,302 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
84
+ 2024-05-14 16:34:52,302 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 2
85
+ 2024-05-14 16:34:52,302 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 3
86
+ 2024-05-14 16:34:52,303 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
87
+ 2024-05-14 16:34:52,303 DEBUG SenderThread:117718 [sender.py:send():378] send: stats
88
+ 2024-05-14 16:34:52,303 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 3
89
+ 2024-05-14 16:34:52,304 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
90
+ 2024-05-14 16:34:52,304 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 3
91
+ 2024-05-14 16:34:52,304 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 4
92
+ 2024-05-14 16:34:52,304 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
93
+ 2024-05-14 16:34:52,304 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 4
94
+ 2024-05-14 16:34:52,304 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
95
+ 2024-05-14 16:34:52,304 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 4
96
+ 2024-05-14 16:34:52,304 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 5
97
+ 2024-05-14 16:34:52,304 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
98
+ 2024-05-14 16:34:52,304 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 5
99
+ 2024-05-14 16:34:52,304 DEBUG SenderThread:117718 [sender.py:send():378] send: summary
100
+ 2024-05-14 16:34:52,305 INFO SenderThread:117718 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
101
+ 2024-05-14 16:34:52,305 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
102
+ 2024-05-14 16:34:52,305 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 5
103
+ 2024-05-14 16:34:52,305 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 6
104
+ 2024-05-14 16:34:52,306 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
105
+ 2024-05-14 16:34:52,306 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 6
106
+ 2024-05-14 16:34:52,306 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
107
+ 2024-05-14 16:34:52,306 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 6
108
+ 2024-05-14 16:34:52,308 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: status_report
109
+ 2024-05-14 16:34:52,391 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 7
110
+ 2024-05-14 16:34:52,391 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-14 16:34:52,392 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 7
112
+ 2024-05-14 16:34:52,392 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-14 16:34:52,392 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 7
114
+ 2024-05-14 16:34:52,815 INFO Thread-12 :117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/config.yaml
115
+ 2024-05-14 16:34:52,815 INFO Thread-12 :117718 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-summary.json
116
+ 2024-05-14 16:34:53,296 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: poll_exit
117
+ 2024-05-14 16:34:55,235 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 8
118
+ 2024-05-14 16:34:55,235 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: poll_exit
119
+ 2024-05-14 16:34:55,235 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
120
+ 2024-05-14 16:34:55,235 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 8
121
+ 2024-05-14 16:34:55,236 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
122
+ 2024-05-14 16:34:55,236 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 8
123
+ 2024-05-14 16:34:55,236 INFO SenderThread:117718 [job_builder.py:build():432] Attempting to build job artifact
124
+ 2024-05-14 16:34:55,236 INFO SenderThread:117718 [job_builder.py:_get_source_type():576] no source found
125
+ 2024-05-14 16:34:55,236 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 9
126
+ 2024-05-14 16:34:55,236 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
127
+ 2024-05-14 16:34:55,236 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 9
128
+ 2024-05-14 16:34:55,237 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
129
+ 2024-05-14 16:34:55,237 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 9
130
+ 2024-05-14 16:34:55,237 INFO SenderThread:117718 [dir_watcher.py:finish():358] shutting down directory watcher
131
+ 2024-05-14 16:34:55,296 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: poll_exit
132
+ 2024-05-14 16:34:55,816 INFO SenderThread:117718 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
133
+ 2024-05-14 16:34:55,816 INFO SenderThread:117718 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files
134
+ 2024-05-14 16:34:55,816 INFO SenderThread:117718 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/config.yaml config.yaml
135
+ 2024-05-14 16:34:55,816 INFO SenderThread:117718 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/requirements.txt requirements.txt
136
+ 2024-05-14 16:34:55,818 INFO SenderThread:117718 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log output.log
137
+ 2024-05-14 16:34:55,818 INFO SenderThread:117718 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-metadata.json wandb-metadata.json
138
+ 2024-05-14 16:34:55,818 INFO SenderThread:117718 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-summary.json wandb-summary.json
139
+ 2024-05-14 16:34:55,820 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 10
140
+ 2024-05-14 16:34:55,820 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: poll_exit
141
+ 2024-05-14 16:34:55,821 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-14 16:34:55,822 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 10
143
+ 2024-05-14 16:34:55,822 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-14 16:34:55,822 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 10
145
+ 2024-05-14 16:34:55,822 INFO SenderThread:117718 [file_pusher.py:finish():169] shutting down file pusher
146
+ 2024-05-14 16:34:56,051 INFO wandb-upload_0:117718 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/config.yaml
147
+ 2024-05-14 16:34:56,219 INFO wandb-upload_1:117718 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/requirements.txt
148
+ 2024-05-14 16:34:56,297 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: poll_exit
149
+ 2024-05-14 16:34:56,297 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: poll_exit
150
+ 2024-05-14 16:34:56,313 INFO wandb-upload_2:117718 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/output.log
151
+ 2024-05-14 16:34:56,319 INFO wandb-upload_3:117718 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/files/wandb-summary.json
152
+ 2024-05-14 16:34:56,520 INFO Thread-11 (_thread_body):117718 [sender.py:transition_state():613] send defer: 11
153
+ 2024-05-14 16:34:56,520 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
154
+ 2024-05-14 16:34:56,520 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 11
155
+ 2024-05-14 16:34:56,520 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
156
+ 2024-05-14 16:34:56,520 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 11
157
+ 2024-05-14 16:34:56,520 INFO SenderThread:117718 [file_pusher.py:join():175] waiting for file pusher
158
+ 2024-05-14 16:34:56,521 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 12
159
+ 2024-05-14 16:34:56,521 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-14 16:34:56,521 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 12
161
+ 2024-05-14 16:34:56,521 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
162
+ 2024-05-14 16:34:56,521 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 12
163
+ 2024-05-14 16:34:56,521 INFO SenderThread:117718 [file_stream.py:finish():601] file stream finish called
164
+ 2024-05-14 16:34:56,584 INFO SenderThread:117718 [file_stream.py:finish():605] file stream finish is done
165
+ 2024-05-14 16:34:56,584 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 13
166
+ 2024-05-14 16:34:56,584 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
167
+ 2024-05-14 16:34:56,585 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 13
168
+ 2024-05-14 16:34:56,585 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
169
+ 2024-05-14 16:34:56,585 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 13
170
+ 2024-05-14 16:34:56,585 INFO SenderThread:117718 [sender.py:transition_state():613] send defer: 14
171
+ 2024-05-14 16:34:56,585 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: defer
172
+ 2024-05-14 16:34:56,585 DEBUG SenderThread:117718 [sender.py:send():378] send: final
173
+ 2024-05-14 16:34:56,585 INFO HandlerThread:117718 [handler.py:handle_request_defer():184] handle defer: 14
174
+ 2024-05-14 16:34:56,585 DEBUG SenderThread:117718 [sender.py:send():378] send: footer
175
+ 2024-05-14 16:34:56,585 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: defer
176
+ 2024-05-14 16:34:56,585 INFO SenderThread:117718 [sender.py:send_request_defer():609] handle sender defer: 14
177
+ 2024-05-14 16:34:56,586 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: poll_exit
178
+ 2024-05-14 16:34:56,586 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: poll_exit
179
+ 2024-05-14 16:34:56,586 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: poll_exit
180
+ 2024-05-14 16:34:56,586 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: poll_exit
181
+ 2024-05-14 16:34:56,586 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: server_info
182
+ 2024-05-14 16:34:56,586 DEBUG SenderThread:117718 [sender.py:send_request():405] send_request: server_info
183
+ 2024-05-14 16:34:56,588 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: get_summary
184
+ 2024-05-14 16:34:56,588 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: sampled_history
185
+ 2024-05-14 16:34:56,588 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: internal_messages
186
+ 2024-05-14 16:34:56,648 INFO MainThread:117718 [wandb_run.py:_footer_history_summary_info():3994] rendering history
187
+ 2024-05-14 16:34:56,648 INFO MainThread:117718 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
188
+ 2024-05-14 16:34:56,648 INFO MainThread:117718 [wandb_run.py:_footer_sync_info():3953] logging synced files
189
+ 2024-05-14 16:34:56,648 DEBUG HandlerThread:117718 [handler.py:handle_request():158] handle_request: shutdown
190
+ 2024-05-14 16:34:56,648 INFO HandlerThread:117718 [handler.py:finish():882] shutting down handler
191
+ 2024-05-14 16:34:57,586 INFO WriterThread:117718 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/run-gpqnyvgo.wandb
192
+ 2024-05-14 16:34:57,648 INFO SenderThread:117718 [sender.py:finish():1545] shutting down sender
193
+ 2024-05-14 16:34:57,648 INFO SenderThread:117718 [file_pusher.py:finish():169] shutting down file pusher
194
+ 2024-05-14 16:34:57,648 INFO SenderThread:117718 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240514_163424-gpqnyvgo/run-gpqnyvgo.wandb ADDED
Binary file (11.6 kB). View file
 
lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/logs/debug-internal.log ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 16:36:06,744 INFO StreamThr :125099 [internal.py:wandb_internal():85] W&B internal server running at pid: 125099, started at: 2024-05-14 16:36:06.744165
2
+ 2024-05-14 16:36:06,746 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 16:36:06,748 INFO WriterThread:125099 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/run-als7uk9d.wandb
4
+ 2024-05-14 16:36:06,749 DEBUG SenderThread:125099 [sender.py:send():378] send: header
5
+ 2024-05-14 16:36:06,757 DEBUG SenderThread:125099 [sender.py:send():378] send: run
6
+ 2024-05-14 16:36:07,023 INFO SenderThread:125099 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files
7
+ 2024-05-14 16:36:07,024 INFO SenderThread:125099 [sender.py:_start_run_threads():1123] run started: als7uk9d with start time 1715704566.743811
8
+ 2024-05-14 16:36:07,038 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-14 16:36:07,038 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-14 16:36:07,121 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-14 16:36:07,123 DEBUG HandlerThread:125099 [system_info.py:__init__():26] System info init
12
+ 2024-05-14 16:36:07,123 DEBUG HandlerThread:125099 [system_info.py:__init__():41] System info init done
13
+ 2024-05-14 16:36:07,123 INFO HandlerThread:125099 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-14 16:36:07,123 INFO SystemMonitor:125099 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-14 16:36:07,123 INFO HandlerThread:125099 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-14 16:36:07,123 INFO SystemMonitor:125099 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-14 16:36:07,123 INFO SystemMonitor:125099 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-14 16:36:07,124 INFO SystemMonitor:125099 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-14 16:36:07,125 INFO SystemMonitor:125099 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-14 16:36:07,214 DEBUG HandlerThread:125099 [system_info.py:probe():150] Probing system
21
+ 2024-05-14 16:36:07,222 DEBUG HandlerThread:125099 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-14 16:36:07,243 ERROR HandlerThread:125099 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /data/cronscript/lm-evaluation-harness'
28
+ 2024-05-14 16:36:07,243 DEBUG HandlerThread:125099 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-14 16:36:07,243 DEBUG HandlerThread:125099 [system_info.py:probe():198] Probing system done
30
+ 2024-05-14 16:36:07,243 DEBUG HandlerThread:125099 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:36:07.214234', 'startedAt': '2024-05-14T16:36:06.732839', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3392.0355723684215, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3292.895, 'min': 800.0, 'max': 3400.0}, {'current': 3292.903, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3292.929, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3298.22, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3293.82, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4253959655762}}, 'memory': {'total': 1007.5000267028809}}
31
+ 2024-05-14 16:36:07,243 INFO HandlerThread:125099 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-14 16:36:07,243 INFO HandlerThread:125099 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-14 16:36:07,244 INFO HandlerThread:125099 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-14 16:36:07,248 DEBUG SenderThread:125099 [sender.py:send():378] send: files
35
+ 2024-05-14 16:36:07,248 INFO SenderThread:125099 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-14 16:36:07,355 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-14 16:36:07,355 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-14 16:36:07,356 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-14 16:36:07,356 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-14 16:36:07,513 DEBUG SenderThread:125099 [sender.py:send():378] send: telemetry
41
+ 2024-05-14 16:36:07,770 INFO wandb-upload_0:125099 [upload_job.py:push():130] Uploaded file /tmp/tmpvudxjm0kwandb/f3chg6ww-wandb-metadata.json
42
+ 2024-05-14 16:36:08,025 INFO Thread-12 :125099 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log
43
+ 2024-05-14 16:36:08,025 INFO Thread-12 :125099 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt
44
+ 2024-05-14 16:36:08,025 INFO Thread-12 :125099 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-metadata.json
45
+ 2024-05-14 16:36:10,025 INFO Thread-12 :125099 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log
46
+ 2024-05-14 16:36:11,912 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-14 16:36:13,018 DEBUG SenderThread:125099 [sender.py:send():378] send: exit
48
+ 2024-05-14 16:36:13,018 INFO SenderThread:125099 [sender.py:send_exit():585] handling exit code: 1
49
+ 2024-05-14 16:36:13,019 INFO SenderThread:125099 [sender.py:send_exit():587] handling runtime: 5
50
+ 2024-05-14 16:36:13,019 INFO SenderThread:125099 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
51
+ 2024-05-14 16:36:13,020 INFO SenderThread:125099 [sender.py:send_exit():593] send defer
52
+ 2024-05-14 16:36:13,020 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
53
+ 2024-05-14 16:36:13,020 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 0
54
+ 2024-05-14 16:36:13,020 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
55
+ 2024-05-14 16:36:13,020 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 0
56
+ 2024-05-14 16:36:13,020 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 1
57
+ 2024-05-14 16:36:13,020 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
58
+ 2024-05-14 16:36:13,020 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 1
59
+ 2024-05-14 16:36:13,020 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
60
+ 2024-05-14 16:36:13,020 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 1
61
+ 2024-05-14 16:36:13,020 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 2
62
+ 2024-05-14 16:36:13,020 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
63
+ 2024-05-14 16:36:13,020 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 2
64
+ 2024-05-14 16:36:13,020 INFO HandlerThread:125099 [system_monitor.py:finish():203] Stopping system monitor
65
+ 2024-05-14 16:36:13,021 INFO HandlerThread:125099 [interfaces.py:finish():200] Joined cpu monitor
66
+ 2024-05-14 16:36:13,026 INFO HandlerThread:125099 [interfaces.py:finish():200] Joined disk monitor
67
+ 2024-05-14 16:36:13,026 DEBUG SystemMonitor:125099 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-14 16:36:13,026 INFO HandlerThread:125099 [interfaces.py:finish():200] Joined memory monitor
69
+ 2024-05-14 16:36:13,026 DEBUG SystemMonitor:125099 [system_monitor.py:_start():179] Finished system metrics aggregation loop
70
+ 2024-05-14 16:36:13,026 INFO HandlerThread:125099 [interfaces.py:finish():200] Joined network monitor
71
+ 2024-05-14 16:36:13,026 DEBUG SystemMonitor:125099 [system_monitor.py:_start():183] Publishing last batch of metrics
72
+ 2024-05-14 16:36:13,028 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
73
+ 2024-05-14 16:36:13,028 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 2
74
+ 2024-05-14 16:36:13,028 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 3
75
+ 2024-05-14 16:36:13,028 DEBUG SenderThread:125099 [sender.py:send():378] send: stats
76
+ 2024-05-14 16:36:13,029 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
77
+ 2024-05-14 16:36:13,029 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 3
78
+ 2024-05-14 16:36:13,029 INFO Thread-12 :125099 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json
79
+ 2024-05-14 16:36:13,029 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
80
+ 2024-05-14 16:36:13,029 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 3
81
+ 2024-05-14 16:36:13,029 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 4
82
+ 2024-05-14 16:36:13,029 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
83
+ 2024-05-14 16:36:13,029 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 4
84
+ 2024-05-14 16:36:13,029 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
85
+ 2024-05-14 16:36:13,029 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 4
86
+ 2024-05-14 16:36:13,029 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 5
87
+ 2024-05-14 16:36:13,029 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
88
+ 2024-05-14 16:36:13,029 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 5
89
+ 2024-05-14 16:36:13,029 DEBUG SenderThread:125099 [sender.py:send():378] send: summary
90
+ 2024-05-14 16:36:13,030 INFO SenderThread:125099 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
91
+ 2024-05-14 16:36:13,030 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
92
+ 2024-05-14 16:36:13,030 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 5
93
+ 2024-05-14 16:36:13,030 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 6
94
+ 2024-05-14 16:36:13,030 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
95
+ 2024-05-14 16:36:13,030 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 6
96
+ 2024-05-14 16:36:13,031 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
97
+ 2024-05-14 16:36:13,031 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 6
98
+ 2024-05-14 16:36:13,033 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: status_report
99
+ 2024-05-14 16:36:13,103 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 7
100
+ 2024-05-14 16:36:13,103 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-14 16:36:13,103 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 7
102
+ 2024-05-14 16:36:13,103 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
103
+ 2024-05-14 16:36:13,103 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 7
104
+ 2024-05-14 16:36:13,530 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 8
105
+ 2024-05-14 16:36:13,530 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
106
+ 2024-05-14 16:36:13,530 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 8
107
+ 2024-05-14 16:36:13,531 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
108
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 8
109
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [job_builder.py:build():432] Attempting to build job artifact
110
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [job_builder.py:_get_source_type():576] no source found
111
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 9
112
+ 2024-05-14 16:36:13,531 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
113
+ 2024-05-14 16:36:13,531 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 9
114
+ 2024-05-14 16:36:13,531 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
115
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 9
116
+ 2024-05-14 16:36:13,531 INFO SenderThread:125099 [dir_watcher.py:finish():358] shutting down directory watcher
117
+ 2024-05-14 16:36:14,019 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: poll_exit
118
+ 2024-05-14 16:36:14,029 INFO SenderThread:125099 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log
119
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json
120
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml
121
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files
122
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt requirements.txt
123
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json wandb-summary.json
124
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-metadata.json wandb-metadata.json
125
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml config.yaml
126
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log output.log
127
+ 2024-05-14 16:36:14,030 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 10
128
+ 2024-05-14 16:36:14,030 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: poll_exit
129
+ 2024-05-14 16:36:14,030 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-14 16:36:14,032 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 10
131
+ 2024-05-14 16:36:14,033 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-14 16:36:14,034 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 10
133
+ 2024-05-14 16:36:14,034 INFO SenderThread:125099 [file_pusher.py:finish():169] shutting down file pusher
134
+ 2024-05-14 16:36:14,273 INFO wandb-upload_1:125099 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/wandb-summary.json
135
+ 2024-05-14 16:36:14,425 INFO wandb-upload_0:125099 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/requirements.txt
136
+ 2024-05-14 16:36:14,499 INFO wandb-upload_3:125099 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/output.log
137
+ 2024-05-14 16:36:14,503 INFO wandb-upload_2:125099 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/files/config.yaml
138
+ 2024-05-14 16:36:14,704 INFO Thread-11 (_thread_body):125099 [sender.py:transition_state():613] send defer: 11
139
+ 2024-05-14 16:36:14,704 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
140
+ 2024-05-14 16:36:14,704 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 11
141
+ 2024-05-14 16:36:14,705 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
142
+ 2024-05-14 16:36:14,705 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 11
143
+ 2024-05-14 16:36:14,705 INFO SenderThread:125099 [file_pusher.py:join():175] waiting for file pusher
144
+ 2024-05-14 16:36:14,705 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 12
145
+ 2024-05-14 16:36:14,705 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
146
+ 2024-05-14 16:36:14,705 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 12
147
+ 2024-05-14 16:36:14,705 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
148
+ 2024-05-14 16:36:14,705 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 12
149
+ 2024-05-14 16:36:14,705 INFO SenderThread:125099 [file_stream.py:finish():601] file stream finish called
150
+ 2024-05-14 16:36:14,949 INFO SenderThread:125099 [file_stream.py:finish():605] file stream finish is done
151
+ 2024-05-14 16:36:14,949 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 13
152
+ 2024-05-14 16:36:14,949 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
153
+ 2024-05-14 16:36:14,949 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 13
154
+ 2024-05-14 16:36:14,949 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
155
+ 2024-05-14 16:36:14,949 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 13
156
+ 2024-05-14 16:36:14,949 INFO SenderThread:125099 [sender.py:transition_state():613] send defer: 14
157
+ 2024-05-14 16:36:14,949 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: defer
158
+ 2024-05-14 16:36:14,949 DEBUG SenderThread:125099 [sender.py:send():378] send: final
159
+ 2024-05-14 16:36:14,949 INFO HandlerThread:125099 [handler.py:handle_request_defer():184] handle defer: 14
160
+ 2024-05-14 16:36:14,949 DEBUG SenderThread:125099 [sender.py:send():378] send: footer
161
+ 2024-05-14 16:36:14,950 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: defer
162
+ 2024-05-14 16:36:14,950 INFO SenderThread:125099 [sender.py:send_request_defer():609] handle sender defer: 14
163
+ 2024-05-14 16:36:14,950 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: poll_exit
164
+ 2024-05-14 16:36:14,950 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: poll_exit
165
+ 2024-05-14 16:36:14,950 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: poll_exit
166
+ 2024-05-14 16:36:14,950 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: poll_exit
167
+ 2024-05-14 16:36:14,951 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: server_info
168
+ 2024-05-14 16:36:14,951 DEBUG SenderThread:125099 [sender.py:send_request():405] send_request: server_info
169
+ 2024-05-14 16:36:14,952 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: get_summary
170
+ 2024-05-14 16:36:14,952 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: sampled_history
171
+ 2024-05-14 16:36:14,952 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: internal_messages
172
+ 2024-05-14 16:36:15,011 INFO MainThread:125099 [wandb_run.py:_footer_history_summary_info():3994] rendering history
173
+ 2024-05-14 16:36:15,012 INFO MainThread:125099 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
174
+ 2024-05-14 16:36:15,012 INFO MainThread:125099 [wandb_run.py:_footer_sync_info():3953] logging synced files
175
+ 2024-05-14 16:36:15,012 DEBUG HandlerThread:125099 [handler.py:handle_request():158] handle_request: shutdown
176
+ 2024-05-14 16:36:15,012 INFO HandlerThread:125099 [handler.py:finish():882] shutting down handler
177
+ 2024-05-14 16:36:15,951 INFO WriterThread:125099 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163606-als7uk9d/run-als7uk9d.wandb
178
+ 2024-05-14 16:36:16,011 INFO SenderThread:125099 [sender.py:finish():1545] shutting down sender
179
+ 2024-05-14 16:36:16,011 INFO SenderThread:125099 [file_pusher.py:finish():169] shutting down file pusher
180
+ 2024-05-14 16:36:16,011 INFO SenderThread:125099 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716451386
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:08:03:06,984 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:08:03:16,143 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:08:03:16,144 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:08:03:16,144 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000'}
6
+ 2024-05-23:08:03:18,451 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T08:03:06.784564",
5
+ "startedAt": "2024-05-23T08:03:06.286307",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.5,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.64060974121094
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379806518555
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug-internal.log ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 08:03:06,311 INFO StreamThr :3570 [internal.py:wandb_internal():85] W&B internal server running at pid: 3570, started at: 2024-05-23 08:03:06.306316
2
+ 2024-05-23 08:03:06,313 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 08:03:06,314 INFO WriterThread:3570 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/run-xred40sp.wandb
4
+ 2024-05-23 08:03:06,318 DEBUG SenderThread:3570 [sender.py:send():378] send: header
5
+ 2024-05-23 08:03:06,318 DEBUG SenderThread:3570 [sender.py:send():378] send: run
6
+ 2024-05-23 08:03:06,574 INFO SenderThread:3570 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files
7
+ 2024-05-23 08:03:06,574 INFO SenderThread:3570 [sender.py:_start_run_threads():1123] run started: xred40sp with start time 1716451386.306398
8
+ 2024-05-23 08:03:06,575 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 08:03:06,576 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 08:03:06,690 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 08:03:06,692 DEBUG HandlerThread:3570 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 08:03:06,692 DEBUG HandlerThread:3570 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 08:03:06,692 INFO HandlerThread:3570 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 08:03:06,692 INFO SystemMonitor:3570 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 08:03:06,693 INFO HandlerThread:3570 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 08:03:06,699 INFO SystemMonitor:3570 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 08:03:06,700 INFO SystemMonitor:3570 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 08:03:06,700 INFO SystemMonitor:3570 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 08:03:06,700 INFO SystemMonitor:3570 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 08:03:06,784 DEBUG HandlerThread:3570 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 08:03:06,787 DEBUG HandlerThread:3570 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 08:03:06,797 ERROR HandlerThread:3570 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 08:03:06,797 DEBUG HandlerThread:3570 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 08:03:06,797 DEBUG HandlerThread:3570 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 08:03:06,797 DEBUG HandlerThread:3570 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T08:03:06.784564', 'startedAt': '2024-05-23T08:03:06.286307', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.5, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.64060974121094}}, 'memory': {'total': 1007.4379806518555}}
31
+ 2024-05-23 08:03:06,797 INFO HandlerThread:3570 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 08:03:06,797 INFO HandlerThread:3570 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 08:03:06,800 INFO HandlerThread:3570 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 08:03:06,805 DEBUG SenderThread:3570 [sender.py:send():378] send: files
35
+ 2024-05-23 08:03:06,805 INFO SenderThread:3570 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 08:03:06,978 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 08:03:06,978 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 08:03:06,981 DEBUG SenderThread:3570 [sender.py:send():378] send: telemetry
39
+ 2024-05-23 08:03:06,981 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: stop_status
40
+ 2024-05-23 08:03:06,981 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: stop_status
41
+ 2024-05-23 08:03:07,372 INFO wandb-upload_0:3570 [upload_job.py:push():130] Uploaded file /tmp/tmp1reskxomwandb/3o8yafni-wandb-metadata.json
42
+ 2024-05-23 08:03:07,574 INFO Thread-12 :3570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/requirements.txt
43
+ 2024-05-23 08:03:07,575 INFO Thread-12 :3570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log
44
+ 2024-05-23 08:03:07,575 INFO Thread-12 :3570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-metadata.json
45
+ 2024-05-23 08:03:09,574 INFO Thread-12 :3570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log
46
+ 2024-05-23 08:03:12,092 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 08:03:17,145 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 08:03:17,579 INFO Thread-12 :3570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log
49
+ 2024-05-23 08:03:18,458 DEBUG SenderThread:3570 [sender.py:send():378] send: exit
50
+ 2024-05-23 08:03:18,458 INFO SenderThread:3570 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 08:03:18,459 INFO SenderThread:3570 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 08:03:18,460 INFO SenderThread:3570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 08:03:18,460 INFO SenderThread:3570 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 08:03:18,460 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 08:03:18,460 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 08:03:18,460 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 08:03:18,460 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 08:03:18,460 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 08:03:18,460 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 08:03:18,461 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 08:03:18,461 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 08:03:18,461 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 08:03:18,461 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 08:03:18,461 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 08:03:18,461 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 08:03:18,461 INFO HandlerThread:3570 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 08:03:18,461 DEBUG SystemMonitor:3570 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 08:03:18,461 DEBUG SystemMonitor:3570 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 08:03:18,462 DEBUG SystemMonitor:3570 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 08:03:18,462 INFO HandlerThread:3570 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 08:03:18,464 INFO HandlerThread:3570 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 08:03:18,464 INFO HandlerThread:3570 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 08:03:18,464 INFO HandlerThread:3570 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 08:03:18,465 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 08:03:18,465 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 08:03:18,465 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 08:03:18,465 DEBUG SenderThread:3570 [sender.py:send():378] send: stats
78
+ 2024-05-23 08:03:18,466 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 08:03:18,466 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 08:03:18,466 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 08:03:18,467 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 08:03:18,467 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 08:03:18,467 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 08:03:18,467 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 08:03:18,467 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 08:03:18,467 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 08:03:18,467 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 08:03:18,467 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 08:03:18,467 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 08:03:18,467 DEBUG SenderThread:3570 [sender.py:send():378] send: summary
91
+ 2024-05-23 08:03:18,468 INFO SenderThread:3570 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 08:03:18,468 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 08:03:18,468 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 08:03:18,468 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 08:03:18,468 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 08:03:18,468 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 08:03:18,468 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 08:03:18,468 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 08:03:18,473 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 08:03:18,539 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 08:03:18,540 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 08:03:18,540 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 08:03:18,540 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 08:03:18,540 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 08:03:18,581 INFO Thread-12 :3570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/config.yaml
106
+ 2024-05-23 08:03:18,581 INFO Thread-12 :3570 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-summary.json
107
+ 2024-05-23 08:03:19,113 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-23 08:03:19,113 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
109
+ 2024-05-23 08:03:19,113 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 8
110
+ 2024-05-23 08:03:19,113 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
111
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 8
112
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [job_builder.py:build():432] Attempting to build job artifact
113
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [job_builder.py:_get_source_type():576] no source found
114
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 9
115
+ 2024-05-23 08:03:19,114 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
116
+ 2024-05-23 08:03:19,114 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 9
117
+ 2024-05-23 08:03:19,114 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
118
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 9
119
+ 2024-05-23 08:03:19,114 INFO SenderThread:3570 [dir_watcher.py:finish():358] shutting down directory watcher
120
+ 2024-05-23 08:03:19,458 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: poll_exit
121
+ 2024-05-23 08:03:19,582 INFO SenderThread:3570 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log
122
+ 2024-05-23 08:03:19,582 INFO SenderThread:3570 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files
123
+ 2024-05-23 08:03:19,582 INFO SenderThread:3570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log output.log
124
+ 2024-05-23 08:03:19,582 INFO SenderThread:3570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-metadata.json wandb-metadata.json
125
+ 2024-05-23 08:03:19,585 INFO SenderThread:3570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-summary.json wandb-summary.json
126
+ 2024-05-23 08:03:19,585 INFO SenderThread:3570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/requirements.txt requirements.txt
127
+ 2024-05-23 08:03:19,585 INFO SenderThread:3570 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/config.yaml config.yaml
128
+ 2024-05-23 08:03:19,585 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-23 08:03:19,585 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: poll_exit
130
+ 2024-05-23 08:03:19,585 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 08:03:19,586 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 08:03:19,586 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 08:03:19,586 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 08:03:19,586 INFO SenderThread:3570 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 08:03:19,849 INFO wandb-upload_0:3570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/output.log
136
+ 2024-05-23 08:03:20,230 INFO wandb-upload_3:3570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/config.yaml
137
+ 2024-05-23 08:03:20,459 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: poll_exit
138
+ 2024-05-23 08:03:20,459 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: poll_exit
139
+ 2024-05-23 08:03:20,561 INFO wandb-upload_2:3570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/requirements.txt
140
+ 2024-05-23 08:03:21,232 INFO wandb-upload_1:3570 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/files/wandb-summary.json
141
+ 2024-05-23 08:03:21,433 INFO Thread-11 (_thread_body):3570 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 08:03:21,433 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 08:03:21,433 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 08:03:21,433 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 08:03:21,433 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 08:03:21,433 INFO SenderThread:3570 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 08:03:21,433 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 08:03:21,433 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 08:03:21,433 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 08:03:21,434 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 08:03:21,434 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 08:03:21,434 INFO SenderThread:3570 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 08:03:21,459 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: poll_exit
154
+ 2024-05-23 08:03:21,492 INFO SenderThread:3570 [file_stream.py:finish():605] file stream finish is done
155
+ 2024-05-23 08:03:21,492 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 13
156
+ 2024-05-23 08:03:21,492 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: poll_exit
157
+ 2024-05-23 08:03:21,492 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
158
+ 2024-05-23 08:03:21,492 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 13
159
+ 2024-05-23 08:03:21,492 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
160
+ 2024-05-23 08:03:21,492 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 13
161
+ 2024-05-23 08:03:21,492 INFO SenderThread:3570 [sender.py:transition_state():613] send defer: 14
162
+ 2024-05-23 08:03:21,493 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: defer
163
+ 2024-05-23 08:03:21,493 INFO HandlerThread:3570 [handler.py:handle_request_defer():184] handle defer: 14
164
+ 2024-05-23 08:03:21,493 DEBUG SenderThread:3570 [sender.py:send():378] send: final
165
+ 2024-05-23 08:03:21,493 DEBUG SenderThread:3570 [sender.py:send():378] send: footer
166
+ 2024-05-23 08:03:21,493 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: defer
167
+ 2024-05-23 08:03:21,493 INFO SenderThread:3570 [sender.py:send_request_defer():609] handle sender defer: 14
168
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: poll_exit
170
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: server_info
171
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: get_summary
172
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: sampled_history
173
+ 2024-05-23 08:03:21,494 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: internal_messages
174
+ 2024-05-23 08:03:21,494 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: poll_exit
175
+ 2024-05-23 08:03:21,494 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: poll_exit
176
+ 2024-05-23 08:03:21,494 DEBUG SenderThread:3570 [sender.py:send_request():405] send_request: server_info
177
+ 2024-05-23 08:03:21,559 INFO MainThread:3570 [wandb_run.py:_footer_history_summary_info():3994] rendering history
178
+ 2024-05-23 08:03:21,559 INFO MainThread:3570 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
179
+ 2024-05-23 08:03:21,559 INFO MainThread:3570 [wandb_run.py:_footer_sync_info():3953] logging synced files
180
+ 2024-05-23 08:03:21,560 DEBUG HandlerThread:3570 [handler.py:handle_request():158] handle_request: shutdown
181
+ 2024-05-23 08:03:21,560 INFO HandlerThread:3570 [handler.py:finish():882] shutting down handler
182
+ 2024-05-23 08:03:22,494 INFO WriterThread:3570 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/run-xred40sp.wandb
183
+ 2024-05-23 08:03:22,559 INFO SenderThread:3570 [sender.py:finish():1545] shutting down sender
184
+ 2024-05-23 08:03:22,559 INFO SenderThread:3570 [file_pusher.py:finish():169] shutting down file pusher
185
+ 2024-05-23 08:03:22,559 INFO SenderThread:3570 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Configure stats pid to 3415
3
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 08:03:06,300 WARNING MainThread:3415 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 08:03:06,300 INFO MainThread:3415 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug.log
11
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/logs/debug-internal.log
12
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 08:03:06,301 INFO MainThread:3415 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 08:03:06,305 INFO MainThread:3415 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 08:03:06,306 INFO MainThread:3415 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 08:03:06,309 INFO MainThread:3415 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 08:03:06,317 INFO MainThread:3415 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 08:03:06,575 INFO MainThread:3415 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 08:03:06,684 INFO MainThread:3415 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 08:03:06,684 INFO MainThread:3415 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 08:03:06,979 INFO MainThread:3415 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 08:03:06,979 INFO MainThread:3415 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 08:03:06,980 INFO MainThread:3415 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 08:03:06,980 INFO MainThread:3415 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 08:03:06,982 INFO MainThread:3415 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 08:03:22,561 WARNING MsgRouterThr:3415 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_080306-xred40sp/run-xred40sp.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716467939
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:12:39:00,583 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:12:39:09,234 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:12:39:09,235 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:12:39:09,235 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000'}
6
+ 2024-05-23:12:39:11,778 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.3.0
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T12:39:00.381720",
5
+ "startedAt": "2024-05-23T12:38:59.852035",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2325.8338875,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.002,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 209.58000564575195
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379425048828
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 12:38:59,877 INFO StreamThr :4383 [internal.py:wandb_internal():85] W&B internal server running at pid: 4383, started at: 2024-05-23 12:38:59.873996
2
+ 2024-05-23 12:38:59,880 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 12:38:59,881 INFO WriterThread:4383 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/run-6ht0x2b2.wandb
4
+ 2024-05-23 12:38:59,883 DEBUG SenderThread:4383 [sender.py:send():378] send: header
5
+ 2024-05-23 12:38:59,887 DEBUG SenderThread:4383 [sender.py:send():378] send: run
6
+ 2024-05-23 12:39:00,183 INFO SenderThread:4383 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files
7
+ 2024-05-23 12:39:00,183 INFO SenderThread:4383 [sender.py:_start_run_threads():1123] run started: 6ht0x2b2 with start time 1716467939.874055
8
+ 2024-05-23 12:39:00,187 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 12:39:00,187 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 12:39:00,306 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 12:39:00,308 DEBUG HandlerThread:4383 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 12:39:00,308 DEBUG HandlerThread:4383 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 12:39:00,308 INFO HandlerThread:4383 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 12:39:00,308 INFO SystemMonitor:4383 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 12:39:00,308 INFO HandlerThread:4383 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 12:39:00,315 INFO SystemMonitor:4383 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 12:39:00,321 INFO SystemMonitor:4383 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 12:39:00,321 INFO SystemMonitor:4383 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 12:39:00,322 INFO SystemMonitor:4383 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 12:39:00,381 DEBUG HandlerThread:4383 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 12:39:00,384 DEBUG HandlerThread:4383 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 12:39:00,394 ERROR HandlerThread:4383 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 12:39:00,395 DEBUG HandlerThread:4383 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 12:39:00,395 DEBUG HandlerThread:4383 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 12:39:00,395 DEBUG HandlerThread:4383 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:39:00.381720', 'startedAt': '2024-05-23T12:38:59.852035', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step6000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2325.8338875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.002, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.58000564575195}}, 'memory': {'total': 1007.4379425048828}}
31
+ 2024-05-23 12:39:00,395 INFO HandlerThread:4383 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 12:39:00,395 INFO HandlerThread:4383 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 12:39:00,398 INFO HandlerThread:4383 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 12:39:00,403 DEBUG SenderThread:4383 [sender.py:send():378] send: files
35
+ 2024-05-23 12:39:00,403 INFO SenderThread:4383 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 12:39:00,577 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 12:39:00,577 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 12:39:00,578 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 12:39:00,579 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 12:39:00,686 DEBUG SenderThread:4383 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 12:39:00,997 INFO wandb-upload_0:4383 [upload_job.py:push():130] Uploaded file /tmp/tmp0zw3fovlwandb/ojj5funt-wandb-metadata.json
42
+ 2024-05-23 12:39:01,186 INFO Thread-12 :4383 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-metadata.json
43
+ 2024-05-23 12:39:01,187 INFO Thread-12 :4383 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log
44
+ 2024-05-23 12:39:01,187 INFO Thread-12 :4383 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/requirements.txt
45
+ 2024-05-23 12:39:03,186 INFO Thread-12 :4383 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log
46
+ 2024-05-23 12:39:05,689 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 12:39:11,194 INFO Thread-12 :4383 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log
48
+ 2024-05-23 12:39:11,236 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-23 12:39:11,786 DEBUG SenderThread:4383 [sender.py:send():378] send: exit
50
+ 2024-05-23 12:39:11,786 INFO SenderThread:4383 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 12:39:11,786 INFO SenderThread:4383 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 12:39:11,788 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 12:39:11,788 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 12:39:11,788 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 12:39:11,788 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 12:39:11,788 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 12:39:11,788 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 12:39:11,788 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 12:39:11,789 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 12:39:11,789 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 12:39:11,789 INFO HandlerThread:4383 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 12:39:11,789 DEBUG SystemMonitor:4383 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 12:39:11,789 DEBUG SystemMonitor:4383 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 12:39:11,789 INFO HandlerThread:4383 [interfaces.py:finish():200] Joined cpu monitor
70
+ 2024-05-23 12:39:11,789 DEBUG SystemMonitor:4383 [system_monitor.py:_start():183] Publishing last batch of metrics
71
+ 2024-05-23 12:39:11,790 INFO HandlerThread:4383 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 12:39:11,791 INFO HandlerThread:4383 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 12:39:11,791 INFO HandlerThread:4383 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 12:39:11,792 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 12:39:11,792 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 12:39:11,792 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 12:39:11,792 DEBUG SenderThread:4383 [sender.py:send():378] send: stats
78
+ 2024-05-23 12:39:11,793 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 12:39:11,793 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 12:39:11,793 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 12:39:11,793 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 12:39:11,793 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 12:39:11,793 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 12:39:11,793 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 12:39:11,793 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 12:39:11,793 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 12:39:11,793 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 12:39:11,793 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 12:39:11,793 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 12:39:11,794 DEBUG SenderThread:4383 [sender.py:send():378] send: summary
91
+ 2024-05-23 12:39:11,794 INFO SenderThread:4383 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 12:39:11,795 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 12:39:11,795 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 12:39:11,795 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 12:39:11,795 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 12:39:11,795 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 12:39:11,795 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 12:39:11,795 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 12:39:11,799 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 12:39:11,882 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 12:39:11,882 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 12:39:11,882 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 12:39:11,882 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 12:39:11,882 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 12:39:12,196 INFO Thread-12 :4383 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/config.yaml
106
+ 2024-05-23 12:39:12,196 INFO Thread-12 :4383 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-summary.json
107
+ 2024-05-23 12:39:12,710 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-23 12:39:12,710 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
109
+ 2024-05-23 12:39:12,710 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 8
110
+ 2024-05-23 12:39:12,711 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
111
+ 2024-05-23 12:39:12,711 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 8
112
+ 2024-05-23 12:39:12,711 INFO SenderThread:4383 [job_builder.py:build():432] Attempting to build job artifact
113
+ 2024-05-23 12:39:12,711 INFO SenderThread:4383 [job_builder.py:_get_source_type():576] no source found
114
+ 2024-05-23 12:39:12,711 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 9
115
+ 2024-05-23 12:39:12,711 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
116
+ 2024-05-23 12:39:12,711 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 9
117
+ 2024-05-23 12:39:12,712 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
118
+ 2024-05-23 12:39:12,712 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 9
119
+ 2024-05-23 12:39:12,712 INFO SenderThread:4383 [dir_watcher.py:finish():358] shutting down directory watcher
120
+ 2024-05-23 12:39:12,786 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: poll_exit
121
+ 2024-05-23 12:39:13,197 INFO SenderThread:4383 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log
122
+ 2024-05-23 12:39:13,198 INFO SenderThread:4383 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files
123
+ 2024-05-23 12:39:13,198 INFO SenderThread:4383 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/requirements.txt requirements.txt
124
+ 2024-05-23 12:39:13,198 INFO SenderThread:4383 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/config.yaml config.yaml
125
+ 2024-05-23 12:39:13,200 INFO SenderThread:4383 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-metadata.json wandb-metadata.json
126
+ 2024-05-23 12:39:13,201 INFO SenderThread:4383 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-summary.json wandb-summary.json
127
+ 2024-05-23 12:39:13,201 INFO SenderThread:4383 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log output.log
128
+ 2024-05-23 12:39:13,201 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-23 12:39:13,201 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: poll_exit
130
+ 2024-05-23 12:39:13,201 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 12:39:13,201 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 12:39:13,203 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 12:39:13,203 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 12:39:13,203 INFO SenderThread:4383 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 12:39:13,444 INFO wandb-upload_0:4383 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/requirements.txt
136
+ 2024-05-23 12:39:13,786 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: poll_exit
137
+ 2024-05-23 12:39:13,786 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: poll_exit
138
+ 2024-05-23 12:39:13,824 INFO wandb-upload_2:4383 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/wandb-summary.json
139
+ 2024-05-23 12:39:14,077 INFO wandb-upload_1:4383 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/config.yaml
140
+ 2024-05-23 12:39:14,084 INFO wandb-upload_3:4383 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/files/output.log
141
+ 2024-05-23 12:39:14,284 INFO Thread-11 (_thread_body):4383 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 12:39:14,284 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 12:39:14,284 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 12:39:14,284 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 12:39:14,284 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 12:39:14,284 INFO SenderThread:4383 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 12:39:14,285 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 12:39:14,285 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 12:39:14,285 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 12:39:14,285 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 12:39:14,285 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 12:39:14,285 INFO SenderThread:4383 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 12:39:14,360 INFO SenderThread:4383 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 12:39:14,360 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 12:39:14,360 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 12:39:14,360 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 12:39:14,360 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 12:39:14,360 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 12:39:14,360 INFO SenderThread:4383 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 12:39:14,360 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 12:39:14,360 INFO HandlerThread:4383 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 12:39:14,361 DEBUG SenderThread:4383 [sender.py:send():378] send: final
163
+ 2024-05-23 12:39:14,361 DEBUG SenderThread:4383 [sender.py:send():378] send: footer
164
+ 2024-05-23 12:39:14,361 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 12:39:14,361 INFO SenderThread:4383 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 12:39:14,361 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 12:39:14,362 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: poll_exit
168
+ 2024-05-23 12:39:14,362 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: server_info
169
+ 2024-05-23 12:39:14,362 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: get_summary
170
+ 2024-05-23 12:39:14,362 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: sampled_history
171
+ 2024-05-23 12:39:14,362 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: internal_messages
172
+ 2024-05-23 12:39:14,362 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: poll_exit
173
+ 2024-05-23 12:39:14,362 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 12:39:14,362 DEBUG SenderThread:4383 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 12:39:14,414 INFO MainThread:4383 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 12:39:14,414 INFO MainThread:4383 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 12:39:14,414 INFO MainThread:4383 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 12:39:14,414 DEBUG HandlerThread:4383 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 12:39:14,414 INFO HandlerThread:4383 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 12:39:15,362 INFO WriterThread:4383 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/run-6ht0x2b2.wandb
181
+ 2024-05-23 12:39:15,414 INFO SenderThread:4383 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 12:39:15,414 INFO SenderThread:4383 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 12:39:15,414 INFO SenderThread:4383 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Configure stats pid to 4228
3
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 12:38:59,869 WARNING MainThread:4228 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug.log
11
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/logs/debug-internal.log
12
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 12:38:59,869 INFO MainThread:4228 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 12:38:59,872 INFO MainThread:4228 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 12:38:59,873 INFO MainThread:4228 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 12:38:59,877 INFO MainThread:4228 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 12:38:59,886 INFO MainThread:4228 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 12:39:00,186 INFO MainThread:4228 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 12:39:00,299 INFO MainThread:4228 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 12:39:00,299 INFO MainThread:4228 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 12:39:00,578 INFO MainThread:4228 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 12:39:00,578 INFO MainThread:4228 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 12:39:00,578 INFO MainThread:4228 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 12:39:00,578 INFO MainThread:4228 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 12:39:00,581 INFO MainThread:4228 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 12:39:15,416 WARNING MsgRouterThr:4228 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_123859-6ht0x2b2/run-6ht0x2b2.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240608_122925-vmbmpokf/run-vmbmpokf.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6505aee9b094c1fb265311bb8aac60a155eb9fea8c9dc21dd2cf85edd6aafba
3
+ size 1293291
venv/lib/python3.10/site-packages/portalocker/__about__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __package_name__ = 'portalocker'
2
+ __author__ = 'Rick van Hattem'
3
+ __email__ = '[email protected]'
4
+ __version__ = '2.8.2'
5
+ __description__ = '''Wraps the portalocker recipe for easy usage'''
6
+ __url__ = 'https://github.com/WoLpH/portalocker'
venv/lib/python3.10/site-packages/portalocker/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import __about__, constants, exceptions, portalocker, utils
2
+
3
+ try: # pragma: no cover
4
+ from .redis import RedisLock
5
+ except ImportError: # pragma: no cover
6
+ RedisLock = None # type: ignore
7
+
8
+
9
+ #: The package name on Pypi
10
+ __package_name__ = __about__.__package_name__
11
+ #: Current author and maintainer, view the git history for the previous ones
12
+ __author__ = __about__.__author__
13
+ #: Current author's email address
14
+ __email__ = __about__.__email__
15
+ #: Version number
16
+ __version__ = '2.8.2'
17
+ #: Package description for Pypi
18
+ __description__ = __about__.__description__
19
+ #: Package homepage
20
+ __url__ = __about__.__url__
21
+
22
+
23
+ #: Exception thrown when the file is already locked by someone else
24
+ AlreadyLocked = exceptions.AlreadyLocked
25
+ #: Exception thrown if an error occurred during locking
26
+ LockException = exceptions.LockException
27
+
28
+
29
+ #: Lock a file. Note that this is an advisory lock on Linux/Unix systems
30
+ lock = portalocker.lock
31
+ #: Unlock a file
32
+ unlock = portalocker.unlock
33
+
34
+ #: Place an exclusive lock.
35
+ #: Only one process may hold an exclusive lock for a given file at a given
36
+ #: time.
37
+ LOCK_EX: constants.LockFlags = constants.LockFlags.EXCLUSIVE
38
+
39
+ #: Place a shared lock.
40
+ #: More than one process may hold a shared lock for a given file at a given
41
+ #: time.
42
+ LOCK_SH: constants.LockFlags = constants.LockFlags.SHARED
43
+
44
+ #: Acquire the lock in a non-blocking fashion.
45
+ LOCK_NB: constants.LockFlags = constants.LockFlags.NON_BLOCKING
46
+
47
+ #: Remove an existing lock held by this process.
48
+ LOCK_UN: constants.LockFlags = constants.LockFlags.UNBLOCK
49
+
50
+ #: Locking flags enum
51
+ LockFlags = constants.LockFlags
52
+
53
+ #: Locking utility class to automatically handle opening with timeouts and
54
+ #: context wrappers
55
+ Lock = utils.Lock
56
+ RLock = utils.RLock
57
+ BoundedSemaphore = utils.BoundedSemaphore
58
+ TemporaryFileLock = utils.TemporaryFileLock
59
+ open_atomic = utils.open_atomic
60
+
61
+ __all__ = [
62
+ 'lock',
63
+ 'unlock',
64
+ 'LOCK_EX',
65
+ 'LOCK_SH',
66
+ 'LOCK_NB',
67
+ 'LOCK_UN',
68
+ 'LockFlags',
69
+ 'LockException',
70
+ 'Lock',
71
+ 'RLock',
72
+ 'AlreadyLocked',
73
+ 'BoundedSemaphore',
74
+ 'open_atomic',
75
+ 'RedisLock',
76
+ ]
venv/lib/python3.10/site-packages/portalocker/__main__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ import re
6
+
7
+ base_path = pathlib.Path(__file__).parent.parent
8
+ src_path = base_path / 'portalocker'
9
+ dist_path = base_path / 'dist'
10
+ _default_output_path = base_path / 'dist' / 'portalocker.py'
11
+
12
+ _RELATIVE_IMPORT_RE = re.compile(r'^from \. import (?P<names>.+)$')
13
+ _USELESS_ASSIGNMENT_RE = re.compile(r'^(?P<name>\w+) = \1\n$')
14
+
15
+ _TEXT_TEMPLATE = """'''
16
+ {}
17
+ '''
18
+
19
+ """
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ def main(argv=None):
25
+ parser = argparse.ArgumentParser()
26
+
27
+ subparsers = parser.add_subparsers(required=True)
28
+ combine_parser = subparsers.add_parser(
29
+ 'combine',
30
+ help='Combine all Python files into a single unified `portalocker.py` '
31
+ 'file for easy distribution',
32
+ )
33
+ combine_parser.add_argument(
34
+ '--output-file',
35
+ '-o',
36
+ type=argparse.FileType('w'),
37
+ default=str(_default_output_path),
38
+ )
39
+
40
+ combine_parser.set_defaults(func=combine)
41
+ args = parser.parse_args(argv)
42
+ args.func(args)
43
+
44
+
45
+ def _read_file(path, seen_files):
46
+ if path in seen_files:
47
+ return
48
+
49
+ names = set()
50
+ seen_files.add(path)
51
+ for line in path.open():
52
+ if match := _RELATIVE_IMPORT_RE.match(line):
53
+ for name in match.group('names').split(','):
54
+ name = name.strip()
55
+ names.add(name)
56
+ yield from _read_file(src_path / f'{name}.py', seen_files)
57
+ else:
58
+ yield _clean_line(line, names)
59
+
60
+
61
+ def _clean_line(line, names):
62
+ # Replace `some_import.spam` with `spam`
63
+ if names:
64
+ joined_names = '|'.join(names)
65
+ line = re.sub(fr'\b({joined_names})\.', '', line)
66
+
67
+ # Replace useless assignments (e.g. `spam = spam`)
68
+ return _USELESS_ASSIGNMENT_RE.sub('', line)
69
+
70
+
71
+ def combine(args):
72
+ output_file = args.output_file
73
+ pathlib.Path(output_file.name).parent.mkdir(parents=True, exist_ok=True)
74
+
75
+ output_file.write(
76
+ _TEXT_TEMPLATE.format((base_path / 'README.rst').read_text()),
77
+ )
78
+ output_file.write(
79
+ _TEXT_TEMPLATE.format((base_path / 'LICENSE').read_text()),
80
+ )
81
+
82
+ seen_files = set()
83
+ for line in _read_file(src_path / '__init__.py', seen_files):
84
+ output_file.write(line)
85
+
86
+ output_file.flush()
87
+ output_file.close()
88
+
89
+ logger.info(f'Wrote combined file to {output_file.name}')
90
+ # Run black and ruff if available. If not then just run the file.
91
+ os.system(f'black {output_file.name}')
92
+ os.system(f'ruff --fix {output_file.name}')
93
+ os.system(f'python3 {output_file.name}')
94
+
95
+
96
+ if __name__ == '__main__':
97
+ logging.basicConfig(level=logging.INFO)
98
+ main()
venv/lib/python3.10/site-packages/portalocker/__pycache__/__about__.cpython-310.pyc ADDED
Binary file (425 Bytes). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (999 Bytes). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/constants.cpython-310.pyc ADDED
Binary file (948 Bytes). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/portalocker.cpython-310.pyc ADDED
Binary file (2.5 kB). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/redis.cpython-310.pyc ADDED
Binary file (6.63 kB). View file
 
venv/lib/python3.10/site-packages/portalocker/__pycache__/utils.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
venv/lib/python3.10/site-packages/portalocker/constants.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Locking constants
3
+
4
+ Lock types:
5
+
6
+ - `EXCLUSIVE` exclusive lock
7
+ - `SHARED` shared lock
8
+
9
+ Lock flags:
10
+
11
+ - `NON_BLOCKING` non-blocking
12
+
13
+ Manually unlock, only needed internally
14
+
15
+ - `UNBLOCK` unlock
16
+ '''
17
+ import enum
18
+ import os
19
+
20
+ # The actual tests will execute the code anyhow so the following code can
21
+ # safely be ignored from the coverage tests
22
+ if os.name == 'nt': # pragma: no cover
23
+ import msvcrt
24
+
25
+ #: exclusive lock
26
+ LOCK_EX = 0x1
27
+ #: shared lock
28
+ LOCK_SH = 0x2
29
+ #: non-blocking
30
+ LOCK_NB = 0x4
31
+ #: unlock
32
+ LOCK_UN = msvcrt.LK_UNLCK # type: ignore
33
+
34
+ elif os.name == 'posix': # pragma: no cover
35
+ import fcntl
36
+
37
+ #: exclusive lock
38
+ LOCK_EX = fcntl.LOCK_EX
39
+ #: shared lock
40
+ LOCK_SH = fcntl.LOCK_SH
41
+ #: non-blocking
42
+ LOCK_NB = fcntl.LOCK_NB
43
+ #: unlock
44
+ LOCK_UN = fcntl.LOCK_UN
45
+
46
+ else: # pragma: no cover
47
+ raise RuntimeError('PortaLocker only defined for nt and posix platforms')
48
+
49
+
50
+ class LockFlags(enum.IntFlag):
51
+ #: exclusive lock
52
+ EXCLUSIVE = LOCK_EX
53
+ #: shared lock
54
+ SHARED = LOCK_SH
55
+ #: non-blocking
56
+ NON_BLOCKING = LOCK_NB
57
+ #: unlock
58
+ UNBLOCK = LOCK_UN
venv/lib/python3.10/site-packages/portalocker/exceptions.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing
2
+
3
+
4
+ class BaseLockException(Exception): # noqa: N818
5
+ # Error codes:
6
+ LOCK_FAILED = 1
7
+
8
+ def __init__(
9
+ self,
10
+ *args: typing.Any,
11
+ fh: typing.Union[typing.IO, None, int] = None,
12
+ **kwargs: typing.Any,
13
+ ) -> None:
14
+ self.fh = fh
15
+ Exception.__init__(self, *args)
16
+
17
+
18
+ class LockException(BaseLockException):
19
+ pass
20
+
21
+
22
+ class AlreadyLocked(LockException):
23
+ pass
24
+
25
+
26
+ class FileToLarge(LockException):
27
+ pass
venv/lib/python3.10/site-packages/portalocker/portalocker.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import os
3
+ import typing
4
+
5
+ from . import constants, exceptions
6
+
7
+ # Alias for readability. Due to import recursion issues we cannot do:
8
+ # from .constants import LockFlags
9
+ LockFlags = constants.LockFlags
10
+
11
+
12
+ if os.name == 'nt': # pragma: no cover
13
+ import msvcrt
14
+
15
+ import pywintypes
16
+ import win32con
17
+ import win32file
18
+ import winerror
19
+
20
+ __overlapped = pywintypes.OVERLAPPED()
21
+
22
+ def lock(file_: typing.Union[typing.IO, int], flags: LockFlags):
23
+ # Windows locking does not support locking through `fh.fileno()` so
24
+ # we cast it to make mypy and pyright happy
25
+ file_ = typing.cast(typing.IO, file_)
26
+
27
+ mode = 0
28
+ if flags & LockFlags.NON_BLOCKING:
29
+ mode |= win32con.LOCKFILE_FAIL_IMMEDIATELY
30
+
31
+ if flags & LockFlags.EXCLUSIVE:
32
+ mode |= win32con.LOCKFILE_EXCLUSIVE_LOCK
33
+
34
+ # Save the old position so we can go back to that position but
35
+ # still lock from the beginning of the file
36
+ savepos = file_.tell()
37
+ if savepos:
38
+ file_.seek(0)
39
+
40
+ os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore
41
+ try:
42
+ win32file.LockFileEx(os_fh, mode, 0, -0x10000, __overlapped)
43
+ except pywintypes.error as exc_value:
44
+ # error: (33, 'LockFileEx', 'The process cannot access the file
45
+ # because another process has locked a portion of the file.')
46
+ if exc_value.winerror == winerror.ERROR_LOCK_VIOLATION:
47
+ raise exceptions.AlreadyLocked(
48
+ exceptions.LockException.LOCK_FAILED,
49
+ exc_value.strerror,
50
+ fh=file_,
51
+ ) from exc_value
52
+ else:
53
+ # Q: Are there exceptions/codes we should be dealing with
54
+ # here?
55
+ raise
56
+ finally:
57
+ if savepos:
58
+ file_.seek(savepos)
59
+
60
+ def unlock(file_: typing.IO):
61
+ try:
62
+ savepos = file_.tell()
63
+ if savepos:
64
+ file_.seek(0)
65
+
66
+ os_fh = msvcrt.get_osfhandle(file_.fileno()) # type: ignore
67
+ try:
68
+ win32file.UnlockFileEx(
69
+ os_fh,
70
+ 0,
71
+ -0x10000,
72
+ __overlapped,
73
+ )
74
+ except pywintypes.error as exc:
75
+ if exc.winerror != winerror.ERROR_NOT_LOCKED:
76
+ # Q: Are there exceptions/codes we should be
77
+ # dealing with here?
78
+ raise
79
+ finally:
80
+ if savepos:
81
+ file_.seek(savepos)
82
+ except OSError as exc:
83
+ raise exceptions.LockException(
84
+ exceptions.LockException.LOCK_FAILED,
85
+ exc.strerror,
86
+ fh=file_,
87
+ ) from exc
88
+
89
+ elif os.name == 'posix': # pragma: no cover
90
+ import fcntl
91
+
92
+ def lock(file_: typing.Union[typing.IO, int], flags: LockFlags):
93
+ locking_exceptions = (IOError,)
94
+ with contextlib.suppress(NameError):
95
+ locking_exceptions += (BlockingIOError,) # type: ignore
96
+ # Locking with NON_BLOCKING without EXCLUSIVE or SHARED enabled results
97
+ # in an error
98
+ if (flags & LockFlags.NON_BLOCKING) and not flags & (
99
+ LockFlags.SHARED | LockFlags.EXCLUSIVE
100
+ ):
101
+ raise RuntimeError(
102
+ 'When locking in non-blocking mode the SHARED '
103
+ 'or EXCLUSIVE flag must be specified as well',
104
+ )
105
+
106
+ try:
107
+ fcntl.flock(file_, flags)
108
+ except locking_exceptions as exc_value:
109
+ # The exception code varies on different systems so we'll catch
110
+ # every IO error
111
+ raise exceptions.LockException(exc_value, fh=file_) from exc_value
112
+
113
+ def unlock(file_: typing.IO):
114
+ fcntl.flock(file_.fileno(), LockFlags.UNBLOCK)
115
+
116
+ else: # pragma: no cover
117
+ raise RuntimeError('PortaLocker only defined for nt and posix platforms')
venv/lib/python3.10/site-packages/portalocker/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/portalocker/redis.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import _thread
2
+ import json
3
+ import logging
4
+ import random
5
+ import time
6
+ import typing
7
+
8
+ from redis import client
9
+
10
+ from . import exceptions, utils
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ DEFAULT_UNAVAILABLE_TIMEOUT = 1
15
+ DEFAULT_THREAD_SLEEP_TIME = 0.1
16
+
17
+
18
+ class PubSubWorkerThread(client.PubSubWorkerThread): # type: ignore
19
+ def run(self):
20
+ try:
21
+ super().run()
22
+ except Exception: # pragma: no cover
23
+ _thread.interrupt_main()
24
+ raise
25
+
26
+
27
+ class RedisLock(utils.LockBase):
28
+ '''
29
+ An extremely reliable Redis lock based on pubsub with a keep-alive thread
30
+
31
+ As opposed to most Redis locking systems based on key/value pairs,
32
+ this locking method is based on the pubsub system. The big advantage is
33
+ that if the connection gets killed due to network issues, crashing
34
+ processes or otherwise, it will still immediately unlock instead of
35
+ waiting for a lock timeout.
36
+
37
+ To make sure both sides of the lock know about the connection state it is
38
+ recommended to set the `health_check_interval` when creating the redis
39
+ connection..
40
+
41
+ Args:
42
+ channel: the redis channel to use as locking key.
43
+ connection: an optional redis connection if you already have one
44
+ or if you need to specify the redis connection
45
+ timeout: timeout when trying to acquire a lock
46
+ check_interval: check interval while waiting
47
+ fail_when_locked: after the initial lock failed, return an error
48
+ or lock the file. This does not wait for the timeout.
49
+ thread_sleep_time: sleep time between fetching messages from redis to
50
+ prevent a busy/wait loop. In the case of lock conflicts this
51
+ increases the time it takes to resolve the conflict. This should
52
+ be smaller than the `check_interval` to be useful.
53
+ unavailable_timeout: If the conflicting lock is properly connected
54
+ this should never exceed twice your redis latency. Note that this
55
+ will increase the wait time possibly beyond your `timeout` and is
56
+ always executed if a conflict arises.
57
+ redis_kwargs: The redis connection arguments if no connection is
58
+ given. The `DEFAULT_REDIS_KWARGS` are used as default, if you want
59
+ to override these you need to explicitly specify a value (e.g.
60
+ `health_check_interval=0`)
61
+
62
+ '''
63
+
64
+ redis_kwargs: typing.Dict[str, typing.Any]
65
+ thread: typing.Optional[PubSubWorkerThread]
66
+ channel: str
67
+ timeout: float
68
+ connection: typing.Optional[client.Redis]
69
+ pubsub: typing.Optional[client.PubSub] = None
70
+ close_connection: bool
71
+
72
+ DEFAULT_REDIS_KWARGS: typing.ClassVar[typing.Dict[str, typing.Any]] = dict(
73
+ health_check_interval=10,
74
+ )
75
+
76
+ def __init__(
77
+ self,
78
+ channel: str,
79
+ connection: typing.Optional[client.Redis] = None,
80
+ timeout: typing.Optional[float] = None,
81
+ check_interval: typing.Optional[float] = None,
82
+ fail_when_locked: typing.Optional[bool] = False,
83
+ thread_sleep_time: float = DEFAULT_THREAD_SLEEP_TIME,
84
+ unavailable_timeout: float = DEFAULT_UNAVAILABLE_TIMEOUT,
85
+ redis_kwargs: typing.Optional[typing.Dict] = None,
86
+ ):
87
+ # We don't want to close connections given as an argument
88
+ self.close_connection = not connection
89
+
90
+ self.thread = None
91
+ self.channel = channel
92
+ self.connection = connection
93
+ self.thread_sleep_time = thread_sleep_time
94
+ self.unavailable_timeout = unavailable_timeout
95
+ self.redis_kwargs = redis_kwargs or dict()
96
+
97
+ for key, value in self.DEFAULT_REDIS_KWARGS.items():
98
+ self.redis_kwargs.setdefault(key, value)
99
+
100
+ super().__init__(
101
+ timeout=timeout,
102
+ check_interval=check_interval,
103
+ fail_when_locked=fail_when_locked,
104
+ )
105
+
106
+ def get_connection(self) -> client.Redis:
107
+ if not self.connection:
108
+ self.connection = client.Redis(**self.redis_kwargs)
109
+
110
+ return self.connection
111
+
112
+ def channel_handler(self, message):
113
+ if message.get('type') != 'message': # pragma: no cover
114
+ return
115
+
116
+ try:
117
+ data = json.loads(message.get('data'))
118
+ except TypeError: # pragma: no cover
119
+ logger.debug('TypeError while parsing: %r', message)
120
+ return
121
+
122
+ assert self.connection is not None
123
+ self.connection.publish(data['response_channel'], str(time.time()))
124
+
125
+ @property
126
+ def client_name(self):
127
+ return f'{self.channel}-lock'
128
+
129
+ def acquire(
130
+ self,
131
+ timeout: typing.Optional[float] = None,
132
+ check_interval: typing.Optional[float] = None,
133
+ fail_when_locked: typing.Optional[bool] = None,
134
+ ):
135
+ timeout = utils.coalesce(timeout, self.timeout, 0.0)
136
+ check_interval = utils.coalesce(
137
+ check_interval,
138
+ self.check_interval,
139
+ 0.0,
140
+ )
141
+ fail_when_locked = utils.coalesce(
142
+ fail_when_locked,
143
+ self.fail_when_locked,
144
+ )
145
+
146
+ assert not self.pubsub, 'This lock is already active'
147
+ connection = self.get_connection()
148
+
149
+ timeout_generator = self._timeout_generator(timeout, check_interval)
150
+ for _ in timeout_generator: # pragma: no branch
151
+ subscribers = connection.pubsub_numsub(self.channel)[0][1]
152
+
153
+ if subscribers:
154
+ logger.debug(
155
+ 'Found %d lock subscribers for %s',
156
+ subscribers,
157
+ self.channel,
158
+ )
159
+
160
+ if self.check_or_kill_lock(
161
+ connection,
162
+ self.unavailable_timeout,
163
+ ): # pragma: no branch
164
+ continue
165
+ else: # pragma: no cover
166
+ subscribers = 0
167
+
168
+ # Note: this should not be changed to an elif because the if
169
+ # above can still end up here
170
+ if not subscribers:
171
+ connection.client_setname(self.client_name)
172
+ self.pubsub = connection.pubsub()
173
+ self.pubsub.subscribe(**{self.channel: self.channel_handler})
174
+ self.thread = PubSubWorkerThread(
175
+ self.pubsub,
176
+ sleep_time=self.thread_sleep_time,
177
+ )
178
+ self.thread.start()
179
+
180
+ subscribers = connection.pubsub_numsub(self.channel)[0][1]
181
+ if subscribers == 1: # pragma: no branch
182
+ return self
183
+ else: # pragma: no cover
184
+ # Race condition, let's try again
185
+ self.release()
186
+
187
+ if fail_when_locked: # pragma: no cover
188
+ raise exceptions.AlreadyLocked(exceptions)
189
+
190
+ raise exceptions.AlreadyLocked(exceptions)
191
+
192
+ def check_or_kill_lock(self, connection, timeout):
193
+ # Random channel name to get messages back from the lock
194
+ response_channel = f'{self.channel}-{random.random()}'
195
+
196
+ pubsub = connection.pubsub()
197
+ pubsub.subscribe(response_channel)
198
+ connection.publish(
199
+ self.channel,
200
+ json.dumps(
201
+ dict(
202
+ response_channel=response_channel,
203
+ message='ping',
204
+ ),
205
+ ),
206
+ )
207
+
208
+ check_interval = min(self.thread_sleep_time, timeout / 10)
209
+ for _ in self._timeout_generator(
210
+ timeout,
211
+ check_interval,
212
+ ): # pragma: no branch
213
+ if pubsub.get_message(timeout=check_interval):
214
+ pubsub.close()
215
+ return True
216
+
217
+ for client_ in connection.client_list('pubsub'): # pragma: no cover
218
+ if client_.get('name') == self.client_name:
219
+ logger.warning('Killing unavailable redis client: %r', client_)
220
+ connection.client_kill_filter(client_.get('id'))
221
+ return None
222
+
223
+ def release(self):
224
+ if self.thread: # pragma: no branch
225
+ self.thread.stop()
226
+ self.thread.join()
227
+ self.thread = None
228
+ time.sleep(0.01)
229
+
230
+ if self.pubsub: # pragma: no branch
231
+ self.pubsub.unsubscribe(self.channel)
232
+ self.pubsub.close()
233
+ self.pubsub = None
234
+
235
+ def __del__(self):
236
+ self.release()
venv/lib/python3.10/site-packages/portalocker/utils.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import atexit
3
+ import contextlib
4
+ import logging
5
+ import os
6
+ import pathlib
7
+ import random
8
+ import tempfile
9
+ import time
10
+ import typing
11
+ import warnings
12
+
13
+ from . import constants, exceptions, portalocker
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ DEFAULT_TIMEOUT = 5
18
+ DEFAULT_CHECK_INTERVAL = 0.25
19
+ DEFAULT_FAIL_WHEN_LOCKED = False
20
+ LOCK_METHOD = constants.LockFlags.EXCLUSIVE | constants.LockFlags.NON_BLOCKING
21
+
22
+ __all__ = [
23
+ 'Lock',
24
+ 'open_atomic',
25
+ ]
26
+
27
+ Filename = typing.Union[str, pathlib.Path]
28
+
29
+
30
+ def coalesce(*args: typing.Any, test_value: typing.Any = None) -> typing.Any:
31
+ '''Simple coalescing function that returns the first value that is not
32
+ equal to the `test_value`. Or `None` if no value is valid. Usually this
33
+ means that the last given value is the default value.
34
+
35
+ Note that the `test_value` is compared using an identity check
36
+ (i.e. `value is not test_value`) so changing the `test_value` won't work
37
+ for all values.
38
+
39
+ >>> coalesce(None, 1)
40
+ 1
41
+ >>> coalesce()
42
+
43
+ >>> coalesce(0, False, True)
44
+ 0
45
+ >>> coalesce(0, False, True, test_value=0)
46
+ False
47
+
48
+ # This won't work because of the `is not test_value` type testing:
49
+ >>> coalesce([], dict(spam='eggs'), test_value=[])
50
+ []
51
+ '''
52
+ return next((arg for arg in args if arg is not test_value), None)
53
+
54
+
55
+ @contextlib.contextmanager
56
+ def open_atomic(
57
+ filename: Filename,
58
+ binary: bool = True,
59
+ ) -> typing.Iterator[typing.IO]:
60
+ '''Open a file for atomic writing. Instead of locking this method allows
61
+ you to write the entire file and move it to the actual location. Note that
62
+ this makes the assumption that a rename is atomic on your platform which
63
+ is generally the case but not a guarantee.
64
+
65
+ http://docs.python.org/library/os.html#os.rename
66
+
67
+ >>> filename = 'test_file.txt'
68
+ >>> if os.path.exists(filename):
69
+ ... os.remove(filename)
70
+
71
+ >>> with open_atomic(filename) as fh:
72
+ ... written = fh.write(b'test')
73
+ >>> assert os.path.exists(filename)
74
+ >>> os.remove(filename)
75
+
76
+ >>> import pathlib
77
+ >>> path_filename = pathlib.Path('test_file.txt')
78
+
79
+ >>> with open_atomic(path_filename) as fh:
80
+ ... written = fh.write(b'test')
81
+ >>> assert path_filename.exists()
82
+ >>> path_filename.unlink()
83
+ '''
84
+ # `pathlib.Path` cast in case `path` is a `str`
85
+ path: pathlib.Path = pathlib.Path(filename)
86
+
87
+ assert not path.exists(), '%r exists' % path
88
+
89
+ # Create the parent directory if it doesn't exist
90
+ path.parent.mkdir(parents=True, exist_ok=True)
91
+
92
+ temp_fh = tempfile.NamedTemporaryFile(
93
+ mode=binary and 'wb' or 'w',
94
+ dir=str(path.parent),
95
+ delete=False,
96
+ )
97
+ yield temp_fh
98
+ temp_fh.flush()
99
+ os.fsync(temp_fh.fileno())
100
+ temp_fh.close()
101
+ try:
102
+ os.rename(temp_fh.name, path)
103
+ finally:
104
+ with contextlib.suppress(Exception):
105
+ os.remove(temp_fh.name)
106
+
107
+
108
+ class LockBase(abc.ABC): # pragma: no cover
109
+ #: timeout when trying to acquire a lock
110
+ timeout: float
111
+ #: check interval while waiting for `timeout`
112
+ check_interval: float
113
+ #: skip the timeout and immediately fail if the initial lock fails
114
+ fail_when_locked: bool
115
+
116
+ def __init__(
117
+ self,
118
+ timeout: typing.Optional[float] = None,
119
+ check_interval: typing.Optional[float] = None,
120
+ fail_when_locked: typing.Optional[bool] = None,
121
+ ):
122
+ self.timeout = coalesce(timeout, DEFAULT_TIMEOUT)
123
+ self.check_interval = coalesce(check_interval, DEFAULT_CHECK_INTERVAL)
124
+ self.fail_when_locked = coalesce(
125
+ fail_when_locked,
126
+ DEFAULT_FAIL_WHEN_LOCKED,
127
+ )
128
+
129
+ @abc.abstractmethod
130
+ def acquire(
131
+ self,
132
+ timeout: typing.Optional[float] = None,
133
+ check_interval: typing.Optional[float] = None,
134
+ fail_when_locked: typing.Optional[bool] = None,
135
+ ):
136
+ return NotImplemented
137
+
138
+ def _timeout_generator(
139
+ self,
140
+ timeout: typing.Optional[float],
141
+ check_interval: typing.Optional[float],
142
+ ) -> typing.Iterator[int]:
143
+ f_timeout = coalesce(timeout, self.timeout, 0.0)
144
+ f_check_interval = coalesce(check_interval, self.check_interval, 0.0)
145
+
146
+ yield 0
147
+ i = 0
148
+
149
+ start_time = time.perf_counter()
150
+ while start_time + f_timeout > time.perf_counter():
151
+ i += 1
152
+ yield i
153
+
154
+ # Take low lock checks into account to stay within the interval
155
+ since_start_time = time.perf_counter() - start_time
156
+ time.sleep(max(0.001, (i * f_check_interval) - since_start_time))
157
+
158
+ @abc.abstractmethod
159
+ def release(self):
160
+ return NotImplemented
161
+
162
+ def __enter__(self):
163
+ return self.acquire()
164
+
165
+ def __exit__(
166
+ self,
167
+ exc_type: typing.Optional[typing.Type[BaseException]],
168
+ exc_value: typing.Optional[BaseException],
169
+ traceback: typing.Any, # Should be typing.TracebackType
170
+ ) -> typing.Optional[bool]:
171
+ self.release()
172
+ return None
173
+
174
+ def __delete__(self, instance):
175
+ instance.release()
176
+
177
+
178
+ class Lock(LockBase):
179
+ '''Lock manager with built-in timeout
180
+
181
+ Args:
182
+ filename: filename
183
+ mode: the open mode, 'a' or 'ab' should be used for writing. When mode
184
+ contains `w` the file will be truncated to 0 bytes.
185
+ timeout: timeout when trying to acquire a lock
186
+ check_interval: check interval while waiting
187
+ fail_when_locked: after the initial lock failed, return an error
188
+ or lock the file. This does not wait for the timeout.
189
+ **file_open_kwargs: The kwargs for the `open(...)` call
190
+
191
+ fail_when_locked is useful when multiple threads/processes can race
192
+ when creating a file. If set to true than the system will wait till
193
+ the lock was acquired and then return an AlreadyLocked exception.
194
+
195
+ Note that the file is opened first and locked later. So using 'w' as
196
+ mode will result in truncate _BEFORE_ the lock is checked.
197
+ '''
198
+
199
+ def __init__(
200
+ self,
201
+ filename: Filename,
202
+ mode: str = 'a',
203
+ timeout: typing.Optional[float] = None,
204
+ check_interval: float = DEFAULT_CHECK_INTERVAL,
205
+ fail_when_locked: bool = DEFAULT_FAIL_WHEN_LOCKED,
206
+ flags: constants.LockFlags = LOCK_METHOD,
207
+ **file_open_kwargs,
208
+ ):
209
+ if 'w' in mode:
210
+ truncate = True
211
+ mode = mode.replace('w', 'a')
212
+ else:
213
+ truncate = False
214
+
215
+ if timeout is None:
216
+ timeout = DEFAULT_TIMEOUT
217
+ elif not (flags & constants.LockFlags.NON_BLOCKING):
218
+ warnings.warn(
219
+ 'timeout has no effect in blocking mode',
220
+ stacklevel=1,
221
+ )
222
+
223
+ self.fh: typing.Optional[typing.IO] = None
224
+ self.filename: str = str(filename)
225
+ self.mode: str = mode
226
+ self.truncate: bool = truncate
227
+ self.timeout: float = timeout
228
+ self.check_interval: float = check_interval
229
+ self.fail_when_locked: bool = fail_when_locked
230
+ self.flags: constants.LockFlags = flags
231
+ self.file_open_kwargs = file_open_kwargs
232
+
233
+ def acquire(
234
+ self,
235
+ timeout: typing.Optional[float] = None,
236
+ check_interval: typing.Optional[float] = None,
237
+ fail_when_locked: typing.Optional[bool] = None,
238
+ ) -> typing.IO:
239
+ '''Acquire the locked filehandle'''
240
+
241
+ fail_when_locked = coalesce(fail_when_locked, self.fail_when_locked)
242
+
243
+ if (
244
+ not (self.flags & constants.LockFlags.NON_BLOCKING)
245
+ and timeout is not None
246
+ ):
247
+ warnings.warn(
248
+ 'timeout has no effect in blocking mode',
249
+ stacklevel=1,
250
+ )
251
+
252
+ # If we already have a filehandle, return it
253
+ fh: typing.Optional[typing.IO] = self.fh
254
+ if fh:
255
+ return fh
256
+
257
+ # Get a new filehandler
258
+ fh = self._get_fh()
259
+
260
+ def try_close(): # pragma: no cover
261
+ # Silently try to close the handle if possible, ignore all issues
262
+ if fh is not None:
263
+ with contextlib.suppress(Exception):
264
+ fh.close()
265
+
266
+ exception = None
267
+ # Try till the timeout has passed
268
+ for _ in self._timeout_generator(timeout, check_interval):
269
+ exception = None
270
+ try:
271
+ # Try to lock
272
+ fh = self._get_lock(fh)
273
+ break
274
+ except exceptions.LockException as exc:
275
+ # Python will automatically remove the variable from memory
276
+ # unless you save it in a different location
277
+ exception = exc
278
+
279
+ # We already tried to the get the lock
280
+ # If fail_when_locked is True, stop trying
281
+ if fail_when_locked:
282
+ try_close()
283
+ raise exceptions.AlreadyLocked(exception) from exc
284
+
285
+ # Wait a bit
286
+
287
+ if exception:
288
+ try_close()
289
+ # We got a timeout... reraising
290
+ raise exceptions.LockException(exception)
291
+
292
+ # Prepare the filehandle (truncate if needed)
293
+ fh = self._prepare_fh(fh)
294
+
295
+ self.fh = fh
296
+ return fh
297
+
298
+ def release(self):
299
+ '''Releases the currently locked file handle'''
300
+ if self.fh:
301
+ portalocker.unlock(self.fh)
302
+ self.fh.close()
303
+ self.fh = None
304
+
305
+ def _get_fh(self) -> typing.IO:
306
+ '''Get a new filehandle'''
307
+ return open( # noqa: SIM115
308
+ self.filename,
309
+ self.mode,
310
+ **self.file_open_kwargs,
311
+ )
312
+
313
+ def _get_lock(self, fh: typing.IO) -> typing.IO:
314
+ '''
315
+ Try to lock the given filehandle
316
+
317
+ returns LockException if it fails'''
318
+ portalocker.lock(fh, self.flags)
319
+ return fh
320
+
321
+ def _prepare_fh(self, fh: typing.IO) -> typing.IO:
322
+ '''
323
+ Prepare the filehandle for usage
324
+
325
+ If truncate is a number, the file will be truncated to that amount of
326
+ bytes
327
+ '''
328
+ if self.truncate:
329
+ fh.seek(0)
330
+ fh.truncate(0)
331
+
332
+ return fh
333
+
334
+
335
+ class RLock(Lock):
336
+ '''
337
+ A reentrant lock, functions in a similar way to threading.RLock in that it
338
+ can be acquired multiple times. When the corresponding number of release()
339
+ calls are made the lock will finally release the underlying file lock.
340
+ '''
341
+
342
+ def __init__(
343
+ self,
344
+ filename,
345
+ mode='a',
346
+ timeout=DEFAULT_TIMEOUT,
347
+ check_interval=DEFAULT_CHECK_INTERVAL,
348
+ fail_when_locked=False,
349
+ flags=LOCK_METHOD,
350
+ ):
351
+ super().__init__(
352
+ filename,
353
+ mode,
354
+ timeout,
355
+ check_interval,
356
+ fail_when_locked,
357
+ flags,
358
+ )
359
+ self._acquire_count = 0
360
+
361
+ def acquire(
362
+ self,
363
+ timeout: typing.Optional[float] = None,
364
+ check_interval: typing.Optional[float] = None,
365
+ fail_when_locked: typing.Optional[bool] = None,
366
+ ) -> typing.IO:
367
+ if self._acquire_count >= 1:
368
+ fh = self.fh
369
+ else:
370
+ fh = super().acquire(timeout, check_interval, fail_when_locked)
371
+ self._acquire_count += 1
372
+ assert fh
373
+ return fh
374
+
375
+ def release(self):
376
+ if self._acquire_count == 0:
377
+ raise exceptions.LockException(
378
+ 'Cannot release more times than acquired',
379
+ )
380
+
381
+ if self._acquire_count == 1:
382
+ super().release()
383
+ self._acquire_count -= 1
384
+
385
+
386
+ class TemporaryFileLock(Lock):
387
+ def __init__(
388
+ self,
389
+ filename='.lock',
390
+ timeout=DEFAULT_TIMEOUT,
391
+ check_interval=DEFAULT_CHECK_INTERVAL,
392
+ fail_when_locked=True,
393
+ flags=LOCK_METHOD,
394
+ ):
395
+ Lock.__init__(
396
+ self,
397
+ filename=filename,
398
+ mode='w',
399
+ timeout=timeout,
400
+ check_interval=check_interval,
401
+ fail_when_locked=fail_when_locked,
402
+ flags=flags,
403
+ )
404
+ atexit.register(self.release)
405
+
406
+ def release(self):
407
+ Lock.release(self)
408
+ if os.path.isfile(self.filename): # pragma: no branch
409
+ os.unlink(self.filename)
410
+
411
+
412
+ class BoundedSemaphore(LockBase):
413
+ '''
414
+ Bounded semaphore to prevent too many parallel processes from running
415
+
416
+ This method is deprecated because multiple processes that are completely
417
+ unrelated could end up using the same semaphore. To prevent this,
418
+ use `NamedBoundedSemaphore` instead. The
419
+ `NamedBoundedSemaphore` is a drop-in replacement for this class.
420
+
421
+ >>> semaphore = BoundedSemaphore(2, directory='')
422
+ >>> str(semaphore.get_filenames()[0])
423
+ 'bounded_semaphore.00.lock'
424
+ >>> str(sorted(semaphore.get_random_filenames())[1])
425
+ 'bounded_semaphore.01.lock'
426
+ '''
427
+
428
+ lock: typing.Optional[Lock]
429
+
430
+ def __init__(
431
+ self,
432
+ maximum: int,
433
+ name: str = 'bounded_semaphore',
434
+ filename_pattern: str = '{name}.{number:02d}.lock',
435
+ directory: str = tempfile.gettempdir(),
436
+ timeout: typing.Optional[float] = DEFAULT_TIMEOUT,
437
+ check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL,
438
+ fail_when_locked: typing.Optional[bool] = True,
439
+ ):
440
+ self.maximum = maximum
441
+ self.name = name
442
+ self.filename_pattern = filename_pattern
443
+ self.directory = directory
444
+ self.lock: typing.Optional[Lock] = None
445
+ super().__init__(
446
+ timeout=timeout,
447
+ check_interval=check_interval,
448
+ fail_when_locked=fail_when_locked,
449
+ )
450
+
451
+ if not name or name == 'bounded_semaphore':
452
+ warnings.warn(
453
+ '`BoundedSemaphore` without an explicit `name` '
454
+ 'argument is deprecated, use NamedBoundedSemaphore',
455
+ DeprecationWarning,
456
+ stacklevel=1,
457
+ )
458
+
459
+ def get_filenames(self) -> typing.Sequence[pathlib.Path]:
460
+ return [self.get_filename(n) for n in range(self.maximum)]
461
+
462
+ def get_random_filenames(self) -> typing.Sequence[pathlib.Path]:
463
+ filenames = list(self.get_filenames())
464
+ random.shuffle(filenames)
465
+ return filenames
466
+
467
+ def get_filename(self, number) -> pathlib.Path:
468
+ return pathlib.Path(self.directory) / self.filename_pattern.format(
469
+ name=self.name,
470
+ number=number,
471
+ )
472
+
473
+ def acquire(
474
+ self,
475
+ timeout: typing.Optional[float] = None,
476
+ check_interval: typing.Optional[float] = None,
477
+ fail_when_locked: typing.Optional[bool] = None,
478
+ ) -> typing.Optional[Lock]:
479
+ assert not self.lock, 'Already locked'
480
+
481
+ filenames = self.get_filenames()
482
+
483
+ for n in self._timeout_generator(timeout, check_interval): # pragma:
484
+ logger.debug('trying lock (attempt %d) %r', n, filenames)
485
+ # no branch
486
+ if self.try_lock(filenames): # pragma: no branch
487
+ return self.lock # pragma: no cover
488
+
489
+ if fail_when_locked := coalesce(
490
+ fail_when_locked,
491
+ self.fail_when_locked,
492
+ ):
493
+ raise exceptions.AlreadyLocked()
494
+
495
+ return None
496
+
497
+ def try_lock(self, filenames: typing.Sequence[Filename]) -> bool:
498
+ filename: Filename
499
+ for filename in filenames:
500
+ logger.debug('trying lock for %r', filename)
501
+ self.lock = Lock(filename, fail_when_locked=True)
502
+ try:
503
+ self.lock.acquire()
504
+ except exceptions.AlreadyLocked:
505
+ self.lock = None
506
+ else:
507
+ logger.debug('locked %r', filename)
508
+ return True
509
+
510
+ return False
511
+
512
+ def release(self): # pragma: no cover
513
+ if self.lock is not None:
514
+ self.lock.release()
515
+ self.lock = None
516
+
517
+
518
+ class NamedBoundedSemaphore(BoundedSemaphore):
519
+ '''
520
+ Bounded semaphore to prevent too many parallel processes from running
521
+
522
+ It's also possible to specify a timeout when acquiring the lock to wait
523
+ for a resource to become available. This is very similar to
524
+ `threading.BoundedSemaphore` but works across multiple processes and across
525
+ multiple operating systems.
526
+
527
+ Because this works across multiple processes it's important to give the
528
+ semaphore a name. This name is used to create the lock files. If you
529
+ don't specify a name, a random name will be generated. This means that
530
+ you can't use the same semaphore in multiple processes unless you pass the
531
+ semaphore object to the other processes.
532
+
533
+ >>> semaphore = NamedBoundedSemaphore(2, name='test')
534
+ >>> str(semaphore.get_filenames()[0])
535
+ '...test.00.lock'
536
+
537
+ >>> semaphore = NamedBoundedSemaphore(2)
538
+ >>> 'bounded_semaphore' in str(semaphore.get_filenames()[0])
539
+ True
540
+
541
+ '''
542
+
543
+ def __init__(
544
+ self,
545
+ maximum: int,
546
+ name: typing.Optional[str] = None,
547
+ filename_pattern: str = '{name}.{number:02d}.lock',
548
+ directory: str = tempfile.gettempdir(),
549
+ timeout: typing.Optional[float] = DEFAULT_TIMEOUT,
550
+ check_interval: typing.Optional[float] = DEFAULT_CHECK_INTERVAL,
551
+ fail_when_locked: typing.Optional[bool] = True,
552
+ ):
553
+ if name is None:
554
+ name = 'bounded_semaphore.%d' % random.randint(0, 1000000)
555
+ super().__init__(
556
+ maximum,
557
+ name,
558
+ filename_pattern,
559
+ directory,
560
+ timeout,
561
+ check_interval,
562
+ fail_when_locked,
563
+ )
venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.10/site-packages/sacrebleu-2.4.2.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ sacrebleu = sacrebleu.sacrebleu:main
venv/lib/python3.10/site-packages/yaml/__init__.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from .error import *
3
+
4
+ from .tokens import *
5
+ from .events import *
6
+ from .nodes import *
7
+
8
+ from .loader import *
9
+ from .dumper import *
10
+
11
+ __version__ = '6.0.1'
12
+ try:
13
+ from .cyaml import *
14
+ __with_libyaml__ = True
15
+ except ImportError:
16
+ __with_libyaml__ = False
17
+
18
+ import io
19
+
20
+ #------------------------------------------------------------------------------
21
+ # XXX "Warnings control" is now deprecated. Leaving in the API function to not
22
+ # break code that uses it.
23
+ #------------------------------------------------------------------------------
24
+ def warnings(settings=None):
25
+ if settings is None:
26
+ return {}
27
+
28
+ #------------------------------------------------------------------------------
29
+ def scan(stream, Loader=Loader):
30
+ """
31
+ Scan a YAML stream and produce scanning tokens.
32
+ """
33
+ loader = Loader(stream)
34
+ try:
35
+ while loader.check_token():
36
+ yield loader.get_token()
37
+ finally:
38
+ loader.dispose()
39
+
40
+ def parse(stream, Loader=Loader):
41
+ """
42
+ Parse a YAML stream and produce parsing events.
43
+ """
44
+ loader = Loader(stream)
45
+ try:
46
+ while loader.check_event():
47
+ yield loader.get_event()
48
+ finally:
49
+ loader.dispose()
50
+
51
+ def compose(stream, Loader=Loader):
52
+ """
53
+ Parse the first YAML document in a stream
54
+ and produce the corresponding representation tree.
55
+ """
56
+ loader = Loader(stream)
57
+ try:
58
+ return loader.get_single_node()
59
+ finally:
60
+ loader.dispose()
61
+
62
+ def compose_all(stream, Loader=Loader):
63
+ """
64
+ Parse all YAML documents in a stream
65
+ and produce corresponding representation trees.
66
+ """
67
+ loader = Loader(stream)
68
+ try:
69
+ while loader.check_node():
70
+ yield loader.get_node()
71
+ finally:
72
+ loader.dispose()
73
+
74
+ def load(stream, Loader):
75
+ """
76
+ Parse the first YAML document in a stream
77
+ and produce the corresponding Python object.
78
+ """
79
+ loader = Loader(stream)
80
+ try:
81
+ return loader.get_single_data()
82
+ finally:
83
+ loader.dispose()
84
+
85
+ def load_all(stream, Loader):
86
+ """
87
+ Parse all YAML documents in a stream
88
+ and produce corresponding Python objects.
89
+ """
90
+ loader = Loader(stream)
91
+ try:
92
+ while loader.check_data():
93
+ yield loader.get_data()
94
+ finally:
95
+ loader.dispose()
96
+
97
+ def full_load(stream):
98
+ """
99
+ Parse the first YAML document in a stream
100
+ and produce the corresponding Python object.
101
+
102
+ Resolve all tags except those known to be
103
+ unsafe on untrusted input.
104
+ """
105
+ return load(stream, FullLoader)
106
+
107
+ def full_load_all(stream):
108
+ """
109
+ Parse all YAML documents in a stream
110
+ and produce corresponding Python objects.
111
+
112
+ Resolve all tags except those known to be
113
+ unsafe on untrusted input.
114
+ """
115
+ return load_all(stream, FullLoader)
116
+
117
+ def safe_load(stream):
118
+ """
119
+ Parse the first YAML document in a stream
120
+ and produce the corresponding Python object.
121
+
122
+ Resolve only basic YAML tags. This is known
123
+ to be safe for untrusted input.
124
+ """
125
+ return load(stream, SafeLoader)
126
+
127
+ def safe_load_all(stream):
128
+ """
129
+ Parse all YAML documents in a stream
130
+ and produce corresponding Python objects.
131
+
132
+ Resolve only basic YAML tags. This is known
133
+ to be safe for untrusted input.
134
+ """
135
+ return load_all(stream, SafeLoader)
136
+
137
+ def unsafe_load(stream):
138
+ """
139
+ Parse the first YAML document in a stream
140
+ and produce the corresponding Python object.
141
+
142
+ Resolve all tags, even those known to be
143
+ unsafe on untrusted input.
144
+ """
145
+ return load(stream, UnsafeLoader)
146
+
147
+ def unsafe_load_all(stream):
148
+ """
149
+ Parse all YAML documents in a stream
150
+ and produce corresponding Python objects.
151
+
152
+ Resolve all tags, even those known to be
153
+ unsafe on untrusted input.
154
+ """
155
+ return load_all(stream, UnsafeLoader)
156
+
157
+ def emit(events, stream=None, Dumper=Dumper,
158
+ canonical=None, indent=None, width=None,
159
+ allow_unicode=None, line_break=None):
160
+ """
161
+ Emit YAML parsing events into a stream.
162
+ If stream is None, return the produced string instead.
163
+ """
164
+ getvalue = None
165
+ if stream is None:
166
+ stream = io.StringIO()
167
+ getvalue = stream.getvalue
168
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
169
+ allow_unicode=allow_unicode, line_break=line_break)
170
+ try:
171
+ for event in events:
172
+ dumper.emit(event)
173
+ finally:
174
+ dumper.dispose()
175
+ if getvalue:
176
+ return getvalue()
177
+
178
+ def serialize_all(nodes, stream=None, Dumper=Dumper,
179
+ canonical=None, indent=None, width=None,
180
+ allow_unicode=None, line_break=None,
181
+ encoding=None, explicit_start=None, explicit_end=None,
182
+ version=None, tags=None):
183
+ """
184
+ Serialize a sequence of representation trees into a YAML stream.
185
+ If stream is None, return the produced string instead.
186
+ """
187
+ getvalue = None
188
+ if stream is None:
189
+ if encoding is None:
190
+ stream = io.StringIO()
191
+ else:
192
+ stream = io.BytesIO()
193
+ getvalue = stream.getvalue
194
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
195
+ allow_unicode=allow_unicode, line_break=line_break,
196
+ encoding=encoding, version=version, tags=tags,
197
+ explicit_start=explicit_start, explicit_end=explicit_end)
198
+ try:
199
+ dumper.open()
200
+ for node in nodes:
201
+ dumper.serialize(node)
202
+ dumper.close()
203
+ finally:
204
+ dumper.dispose()
205
+ if getvalue:
206
+ return getvalue()
207
+
208
+ def serialize(node, stream=None, Dumper=Dumper, **kwds):
209
+ """
210
+ Serialize a representation tree into a YAML stream.
211
+ If stream is None, return the produced string instead.
212
+ """
213
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
214
+
215
+ def dump_all(documents, stream=None, Dumper=Dumper,
216
+ default_style=None, default_flow_style=False,
217
+ canonical=None, indent=None, width=None,
218
+ allow_unicode=None, line_break=None,
219
+ encoding=None, explicit_start=None, explicit_end=None,
220
+ version=None, tags=None, sort_keys=True):
221
+ """
222
+ Serialize a sequence of Python objects into a YAML stream.
223
+ If stream is None, return the produced string instead.
224
+ """
225
+ getvalue = None
226
+ if stream is None:
227
+ if encoding is None:
228
+ stream = io.StringIO()
229
+ else:
230
+ stream = io.BytesIO()
231
+ getvalue = stream.getvalue
232
+ dumper = Dumper(stream, default_style=default_style,
233
+ default_flow_style=default_flow_style,
234
+ canonical=canonical, indent=indent, width=width,
235
+ allow_unicode=allow_unicode, line_break=line_break,
236
+ encoding=encoding, version=version, tags=tags,
237
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
238
+ try:
239
+ dumper.open()
240
+ for data in documents:
241
+ dumper.represent(data)
242
+ dumper.close()
243
+ finally:
244
+ dumper.dispose()
245
+ if getvalue:
246
+ return getvalue()
247
+
248
+ def dump(data, stream=None, Dumper=Dumper, **kwds):
249
+ """
250
+ Serialize a Python object into a YAML stream.
251
+ If stream is None, return the produced string instead.
252
+ """
253
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
254
+
255
+ def safe_dump_all(documents, stream=None, **kwds):
256
+ """
257
+ Serialize a sequence of Python objects into a YAML stream.
258
+ Produce only basic YAML tags.
259
+ If stream is None, return the produced string instead.
260
+ """
261
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
262
+
263
+ def safe_dump(data, stream=None, **kwds):
264
+ """
265
+ Serialize a Python object into a YAML stream.
266
+ Produce only basic YAML tags.
267
+ If stream is None, return the produced string instead.
268
+ """
269
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
270
+
271
+ def add_implicit_resolver(tag, regexp, first=None,
272
+ Loader=None, Dumper=Dumper):
273
+ """
274
+ Add an implicit scalar detector.
275
+ If an implicit scalar value matches the given regexp,
276
+ the corresponding tag is assigned to the scalar.
277
+ first is a sequence of possible initial characters or None.
278
+ """
279
+ if Loader is None:
280
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
281
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
282
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
283
+ else:
284
+ Loader.add_implicit_resolver(tag, regexp, first)
285
+ Dumper.add_implicit_resolver(tag, regexp, first)
286
+
287
+ def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
288
+ """
289
+ Add a path based resolver for the given tag.
290
+ A path is a list of keys that forms a path
291
+ to a node in the representation tree.
292
+ Keys can be string values, integers, or None.
293
+ """
294
+ if Loader is None:
295
+ loader.Loader.add_path_resolver(tag, path, kind)
296
+ loader.FullLoader.add_path_resolver(tag, path, kind)
297
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
298
+ else:
299
+ Loader.add_path_resolver(tag, path, kind)
300
+ Dumper.add_path_resolver(tag, path, kind)
301
+
302
+ def add_constructor(tag, constructor, Loader=None):
303
+ """
304
+ Add a constructor for the given tag.
305
+ Constructor is a function that accepts a Loader instance
306
+ and a node object and produces the corresponding Python object.
307
+ """
308
+ if Loader is None:
309
+ loader.Loader.add_constructor(tag, constructor)
310
+ loader.FullLoader.add_constructor(tag, constructor)
311
+ loader.UnsafeLoader.add_constructor(tag, constructor)
312
+ else:
313
+ Loader.add_constructor(tag, constructor)
314
+
315
+ def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
316
+ """
317
+ Add a multi-constructor for the given tag prefix.
318
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
319
+ Multi-constructor accepts a Loader instance, a tag suffix,
320
+ and a node object and produces the corresponding Python object.
321
+ """
322
+ if Loader is None:
323
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
324
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
325
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
326
+ else:
327
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
328
+
329
+ def add_representer(data_type, representer, Dumper=Dumper):
330
+ """
331
+ Add a representer for the given type.
332
+ Representer is a function accepting a Dumper instance
333
+ and an instance of the given data type
334
+ and producing the corresponding representation node.
335
+ """
336
+ Dumper.add_representer(data_type, representer)
337
+
338
+ def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
339
+ """
340
+ Add a representer for the given type.
341
+ Multi-representer is a function accepting a Dumper instance
342
+ and an instance of the given data type or subtype
343
+ and producing the corresponding representation node.
344
+ """
345
+ Dumper.add_multi_representer(data_type, multi_representer)
346
+
347
+ class YAMLObjectMetaclass(type):
348
+ """
349
+ The metaclass for YAMLObject.
350
+ """
351
+ def __init__(cls, name, bases, kwds):
352
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
353
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
354
+ if isinstance(cls.yaml_loader, list):
355
+ for loader in cls.yaml_loader:
356
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
357
+ else:
358
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
359
+
360
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
361
+
362
+ class YAMLObject(metaclass=YAMLObjectMetaclass):
363
+ """
364
+ An object that can dump itself to a YAML stream
365
+ and load itself from a YAML stream.
366
+ """
367
+
368
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
369
+
370
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
371
+ yaml_dumper = Dumper
372
+
373
+ yaml_tag = None
374
+ yaml_flow_style = None
375
+
376
+ @classmethod
377
+ def from_yaml(cls, loader, node):
378
+ """
379
+ Convert a representation node to a Python object.
380
+ """
381
+ return loader.construct_yaml_object(node, cls)
382
+
383
+ @classmethod
384
+ def to_yaml(cls, dumper, data):
385
+ """
386
+ Convert a Python object to a representation node.
387
+ """
388
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
389
+ flow_style=cls.yaml_flow_style)
390
+
venv/lib/python3.10/site-packages/yaml/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/yaml/__pycache__/composer.cpython-310.pyc ADDED
Binary file (3.62 kB). View file
 
venv/lib/python3.10/site-packages/yaml/__pycache__/constructor.cpython-310.pyc ADDED
Binary file (20.2 kB). View file