applied-ai-018 commited on
Commit
e48c3ab
·
verified ·
1 Parent(s): a7f6a73

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/26.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/26.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step20/zero/26.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  6. lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files/config.yaml +32 -0
  7. lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug-internal.log +19 -0
  8. lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug.log +20 -0
  9. lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/run-um1uwghq.wandb +0 -0
  10. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/config.yaml +43 -0
  11. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log +33 -0
  12. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/requirements.txt +163 -0
  13. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-metadata.json +810 -0
  14. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-summary.json +1 -0
  15. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug-internal.log +182 -0
  16. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug.log +29 -0
  17. lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/run-t88cgth5.wandb +0 -0
  18. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/config.yaml +43 -0
  19. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log +34 -0
  20. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/requirements.txt +155 -0
  21. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-metadata.json +850 -0
  22. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-summary.json +1 -0
  23. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug-internal.log +182 -0
  24. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug.log +29 -0
  25. lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/run-xsim9azn.wandb +0 -0
  26. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/config.yaml +43 -0
  27. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log +34 -0
  28. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/requirements.txt +155 -0
  29. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-metadata.json +850 -0
  30. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-summary.json +1 -0
  31. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug-internal.log +183 -0
  32. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug.log +29 -0
  33. lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/run-q495m75j.wandb +0 -0
  34. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt +154 -0
  35. lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json +1 -0
  36. venv/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py +29 -0
  37. venv/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py +767 -0
  40. venv/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py +82 -0
  41. venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py +112 -0
  43. venv/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py +1058 -0
  44. venv/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py +405 -0
  45. venv/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py +67 -0
  46. venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc +0 -0
ckpts/universal/global_step20/zero/26.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e0f728bb45c7312584c2c755fc7bebf72d2561d2c437657b21c6bb5b7884913
3
+ size 9372
ckpts/universal/global_step20/zero/26.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a128296ed5c71e3a8f3f6c0a69df2c4f087906c3c039e972fe8fcd3e568a7169
3
+ size 9387
ckpts/universal/global_step20/zero/26.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a69e8ffd317dfc5464ae65ba7534973db1814bc23c6024bde68a545703d06210
3
+ size 9293
ckpts/universal/global_step20/zero/3.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c66cf3e2c1940a5984771c4856d9d20598609758ad8010ccd3a765f37d2ef32c
3
+ size 33555533
ckpts/universal/global_step20/zero/4.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a9ff7aacb96dffcf4d5c5635252f1608e1bd78c50486819bf4467971131e8b3
3
+ size 33555612
lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files/config.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.40.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1715686992
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 3:
26
+ - 23
27
+ 4: 3.10.12
28
+ 5: 0.17.0
29
+ 6: 4.40.2
30
+ 8:
31
+ - 5
32
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug-internal.log ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 11:43:12,780 INFO StreamThr :80497 [internal.py:wandb_internal():85] W&B internal server running at pid: 80497, started at: 2024-05-14 11:43:12.780007
2
+ 2024-05-14 11:43:12,782 DEBUG HandlerThread:80497 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 11:43:12,784 INFO WriterThread:80497 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/run-um1uwghq.wandb
4
+ 2024-05-14 11:43:12,784 DEBUG SenderThread:80497 [sender.py:send():378] send: header
5
+ 2024-05-14 11:43:12,799 DEBUG SenderThread:80497 [sender.py:send():378] send: run
6
+ 2024-05-14 11:43:13,077 INFO SenderThread:80497 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files
7
+ 2024-05-14 11:43:13,077 INFO SenderThread:80497 [sender.py:_start_run_threads():1123] run started: um1uwghq with start time 1715686992.779485
8
+ 2024-05-14 11:43:13,784 WARNING StreamThr :80497 [internal.py:is_dead():413] Internal process exiting, parent pid 79480 disappeared
9
+ 2024-05-14 11:43:13,784 ERROR StreamThr :80497 [internal.py:wandb_internal():151] Internal process shutdown.
10
+ 2024-05-14 11:43:13,799 INFO HandlerThread:80497 [handler.py:finish():882] shutting down handler
11
+ 2024-05-14 11:43:13,799 INFO WriterThread:80497 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/run-um1uwghq.wandb
12
+ 2024-05-14 11:43:14,077 INFO SenderThread:80497 [sender.py:finish():1545] shutting down sender
13
+ 2024-05-14 11:43:14,077 INFO SenderThread:80497 [dir_watcher.py:finish():358] shutting down directory watcher
14
+ 2024-05-14 11:43:15,077 INFO SenderThread:80497 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files
15
+ 2024-05-14 11:43:15,078 INFO SenderThread:80497 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files/config.yaml config.yaml
16
+ 2024-05-14 11:43:15,078 INFO SenderThread:80497 [file_pusher.py:finish():169] shutting down file pusher
17
+ 2024-05-14 11:43:15,078 INFO SenderThread:80497 [file_pusher.py:join():175] waiting for file pusher
18
+ 2024-05-14 11:43:15,521 INFO wandb-upload_0:80497 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/files/config.yaml
19
+ 2024-05-14 11:43:15,784 INFO MainThread:80497 [internal.py:handle_exit():75] Internal process exited
lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Configure stats pid to 79480
3
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-14 11:43:12,776 WARNING MainThread:79480 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-14 11:43:12,776 INFO MainThread:79480 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug.log
11
+ 2024-05-14 11:43:12,777 INFO MainThread:79480 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/logs/debug-internal.log
12
+ 2024-05-14 11:43:12,777 INFO MainThread:79480 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-14 11:43:12,777 INFO MainThread:79480 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-14 11:43:12,777 INFO MainThread:79480 [wandb_init.py:init():610] starting backend
16
+ 2024-05-14 11:43:12,777 INFO MainThread:79480 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-14 11:43:12,778 INFO MainThread:79480 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-14 11:43:12,779 INFO MainThread:79480 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-14 11:43:12,782 INFO MainThread:79480 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-14 11:43:12,798 INFO MainThread:79480 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
lm-evaluation-harness/wandb/run-20240514_114312-um1uwghq/run-um1uwghq.wandb ADDED
Binary file (365 Bytes). View file
 
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.40.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1715704623
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.40.2
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-14:16:37:04,204 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-14:16:37:08,795 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi']
4
+ 2024-05-14:16:37:08,797 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-14:16:37:08,797 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step20'}
6
+ Traceback (most recent call last):
7
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
8
+ return _run_code(code, main_globals, None,
9
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
10
+ exec(code, run_globals)
11
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
12
+ cli_evaluate()
13
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
14
+ results = evaluator.simple_evaluate(
15
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
16
+ return fn(*args, **kwargs)
17
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
18
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
19
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
20
+ return cls(**args, **args2)
21
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
22
+ self._get_config(
23
+ File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
24
+ self._config = transformers.AutoConfig.from_pretrained(
25
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained
26
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
27
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict
28
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
29
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict
30
+ resolved_config_file = cached_file(
31
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file
32
+ raise EnvironmentError(
33
+ OSError: /data/cronscript/ckpts//hf_ckpt//global_step20 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step20/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/requirements.txt ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.3
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.2
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.4
12
+ aiosignal==1.3.1
13
+ antlr4-python3-runtime==4.9.3
14
+ anyio==4.3.0
15
+ async-timeout==4.0.3
16
+ attrs==23.2.0
17
+ av==9.2.0
18
+ cachetools==5.3.3
19
+ certifi==2024.2.2
20
+ cffi==1.15.1
21
+ cfgv==3.4.0
22
+ chardet==5.2.0
23
+ charset-normalizer==3.3.2
24
+ click==8.1.7
25
+ cmake==3.29.2
26
+ colorama==0.4.6
27
+ datasets==2.19.1
28
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
29
+ dill==0.3.8
30
+ distlib==0.3.8
31
+ distro==1.9.0
32
+ docker-pycreds==0.4.0
33
+ einops==0.8.0
34
+ evaluate==0.4.2
35
+ exceptiongroup==1.2.0
36
+ expecttest==0.2.1
37
+ filelock==3.13.4
38
+ frozenlist==1.4.1
39
+ fsspec==2024.3.1
40
+ gitdb==4.0.11
41
+ google-auth-oauthlib==0.4.6
42
+ google-auth==2.29.0
43
+ grpcio==1.62.1
44
+ h11==0.14.0
45
+ habana-media-loader==1.15.1.15
46
+ habana-pyhlml==1.15.1.15
47
+ habana-torch-dataloader==1.15.1.15
48
+ habana-torch-plugin==1.15.1.15
49
+ habana_gpu_migration==1.15.1.15
50
+ habana_quantization_toolkit==1.15.1.15
51
+ hjson==3.1.0
52
+ httpcore==1.0.5
53
+ httpx==0.27.0
54
+ huggingface-hub==0.23.0
55
+ identify==2.5.35
56
+ idna==3.7
57
+ importlib_resources==6.4.0
58
+ iniconfig==2.0.0
59
+ joblib==1.4.2
60
+ jsonlines==4.0.0
61
+ lightning-habana==1.4.0
62
+ lightning-utilities==0.11.2
63
+ lightning==2.2.0.post0
64
+ lm_eval==0.3.0
65
+ lm_eval==0.4.2
66
+ lm_eval==0.4.2
67
+ lm_eval==0.4.2
68
+ mbstrdecoder==1.1.3
69
+ more-itertools==10.2.0
70
+ mpi4py==3.1.4
71
+ mpmath==1.3.0
72
+ multidict==6.0.5
73
+ multiprocess==0.70.16
74
+ networkx==3.3
75
+ ninja==1.11.1.1
76
+ nltk==3.8.1
77
+ nodeenv==1.8.0
78
+ numexpr==2.10.0
79
+ numpy==1.23.5
80
+ oauthlib==3.2.2
81
+ omegaconf==2.3.0
82
+ openai==1.29.0
83
+ packaging==24.0
84
+ pandas==2.0.1
85
+ pathspec==0.12.1
86
+ pathvalidate==3.2.0
87
+ peft==0.10.0
88
+ perfetto==0.7.0
89
+ pip==22.0.2
90
+ pip==23.3.1
91
+ platformdirs==4.2.0
92
+ pluggy==1.4.0
93
+ portalocker==2.8.2
94
+ pre-commit==3.3.3
95
+ protobuf==3.20.3
96
+ psutil==5.9.8
97
+ py-cpuinfo==9.0.0
98
+ pyarrow-hotfix==0.6
99
+ pyarrow==16.0.0
100
+ pyasn1==0.6.0
101
+ pyasn1_modules==0.4.0
102
+ pybind11==2.10.4
103
+ pycountry==23.12.11
104
+ pycparser==2.22
105
+ pydantic==1.10.13
106
+ pynvml==8.0.4
107
+ pytablewriter==1.2.0
108
+ pytest==8.1.1
109
+ python-dateutil==2.9.0.post0
110
+ pytorch-lightning==2.2.2
111
+ pytz==2024.1
112
+ regex==2023.5.5
113
+ requests-oauthlib==2.0.0
114
+ requests==2.31.0
115
+ rouge_score==0.1.2
116
+ rsa==4.9
117
+ sacrebleu==1.5.0
118
+ safetensors==0.4.3
119
+ scikit-learn==1.4.2
120
+ scipy==1.13.0
121
+ sentencepiece==0.2.0
122
+ sentry-sdk==2.1.1
123
+ setproctitle==1.3.3
124
+ setuptools==59.6.0
125
+ setuptools==69.5.1
126
+ six==1.16.0
127
+ smmap==5.0.1
128
+ sniffio==1.3.1
129
+ sqlitedict==2.1.0
130
+ symengine==0.11.0
131
+ sympy==1.12
132
+ tabledata==1.3.3
133
+ tcolorpy==0.1.6
134
+ tdqm==0.0.1
135
+ tensorboard-data-server==0.6.1
136
+ tensorboard-plugin-wit==1.8.1
137
+ tensorboard==2.11.2
138
+ threadpoolctl==3.5.0
139
+ tokenizers==0.19.1
140
+ tomli==2.0.1
141
+ torch==2.2.0a0+git8964477
142
+ torch_tb_profiler==0.4.0
143
+ torchaudio==2.2.0+08901ad
144
+ torchdata==0.7.1+5e6f7b7
145
+ torchmetrics==1.3.2
146
+ torchtext==0.17.0+400da5c
147
+ torchvision==0.17.0+b2383d4
148
+ tqdm-multiprocess==0.0.11
149
+ tqdm==4.66.2
150
+ transformers==4.40.2
151
+ typepy==1.3.2
152
+ typing_extensions==4.11.0
153
+ tzdata==2024.1
154
+ urllib3==1.26.18
155
+ virtualenv==20.25.1
156
+ wandb==0.17.0
157
+ wheel==0.37.1
158
+ wheel==0.43.0
159
+ word2number==1.1
160
+ xxhash==3.4.1
161
+ yamllint==1.35.1
162
+ yarl==1.9.4
163
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-metadata.json ADDED
@@ -0,0 +1,810 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-14T16:37:04.072219",
5
+ "startedAt": "2024-05-14T16:37:03.637970",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20",
13
+ "--tasks",
14
+ "indiccopa-hi",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/data/cronscript/lm-evaluation-harness",
29
+ "host": "vizzhy-150-3",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 76,
33
+ "cpu_count_logical": 152,
34
+ "cpu_freq": {
35
+ "current": 3389.1128618421053,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3300.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3300.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 3400.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 3400.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 3400.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 3400.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 3400.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 3400.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 3400.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 3400.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 3400.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 3400.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 3300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 3400.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 3400.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 3400.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 3400.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 3400.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 3400.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 3400.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 3400.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 3400.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 3400.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 3400.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 3400.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 3400.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 3400.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 3400.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 3400.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 3300.003,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 3400.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 3400.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 3400.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 3400.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 3400.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 3300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 3400.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 3400.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 3396.073,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 3396.311,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 3400.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 3396.716,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 3400.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 3400.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 3400.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 3400.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 3400.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 3400.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 3400.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 3400.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 3400.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 3400.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 3400.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 3400.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 3400.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 3400.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 3400.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 3400.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 3400.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 3400.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 3400.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 3400.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 3400.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 3400.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 3400.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 3400.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 3400.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 3400.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 3400.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 3400.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 3400.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 3400.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 3400.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 3400.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 3400.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 3400.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 3400.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 3300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 3400.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 3400.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 3400.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 3400.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 3400.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 3400.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 3400.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 3400.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 3400.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 3400.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 3400.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 3400.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 3400.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 3400.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 3400.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 3400.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 3400.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 3400.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 3400.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 3400.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 3400.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 3400.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 3400.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 3400.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 3400.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 3400.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 3400.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 3400.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 3300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 3400.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 3400.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 3400.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 3400.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 3400.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 3400.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 3400.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 3400.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 3400.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 3400.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 3400.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 3400.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 3400.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 3400.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 3400.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 3400.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 3400.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 3400.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 3400.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 3400.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 3400.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 3400.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 3400.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 3400.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 3400.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 3400.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 3400.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 3400.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 3400.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 3400.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 3400.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 3400.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 3400.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 3400.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 3400.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3400.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3400.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 3400.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 3400.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 3400.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 3400.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3400.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 3400.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ }
800
+ ],
801
+ "disk": {
802
+ "/": {
803
+ "total": 866.4415092468262,
804
+ "used": 863.4235572814941
805
+ }
806
+ },
807
+ "memory": {
808
+ "total": 1007.5000267028809
809
+ }
810
+ }
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 5}}
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug-internal.log ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 16:37:03,649 INFO StreamThr :127609 [internal.py:wandb_internal():85] W&B internal server running at pid: 127609, started at: 2024-05-14 16:37:03.648986
2
+ 2024-05-14 16:37:03,652 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-14 16:37:03,653 INFO WriterThread:127609 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/run-t88cgth5.wandb
4
+ 2024-05-14 16:37:03,653 DEBUG SenderThread:127609 [sender.py:send():378] send: header
5
+ 2024-05-14 16:37:03,662 DEBUG SenderThread:127609 [sender.py:send():378] send: run
6
+ 2024-05-14 16:37:03,915 INFO SenderThread:127609 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files
7
+ 2024-05-14 16:37:03,915 INFO SenderThread:127609 [sender.py:_start_run_threads():1123] run started: t88cgth5 with start time 1715704623.648688
8
+ 2024-05-14 16:37:03,922 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-14 16:37:03,922 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-14 16:37:04,005 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-14 16:37:04,007 DEBUG HandlerThread:127609 [system_info.py:__init__():26] System info init
12
+ 2024-05-14 16:37:04,007 DEBUG HandlerThread:127609 [system_info.py:__init__():41] System info init done
13
+ 2024-05-14 16:37:04,007 INFO HandlerThread:127609 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-14 16:37:04,007 INFO SystemMonitor:127609 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-14 16:37:04,007 INFO HandlerThread:127609 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-14 16:37:04,008 INFO SystemMonitor:127609 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-14 16:37:04,008 INFO SystemMonitor:127609 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-14 16:37:04,009 INFO SystemMonitor:127609 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-14 16:37:04,009 INFO SystemMonitor:127609 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-14 16:37:04,072 DEBUG HandlerThread:127609 [system_info.py:probe():150] Probing system
21
+ 2024-05-14 16:37:04,080 DEBUG HandlerThread:127609 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-14 16:37:04,100 ERROR HandlerThread:127609 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /data/cronscript/lm-evaluation-harness'
28
+ 2024-05-14 16:37:04,100 DEBUG HandlerThread:127609 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-14 16:37:04,100 DEBUG HandlerThread:127609 [system_info.py:probe():198] Probing system done
30
+ 2024-05-14 16:37:04,100 DEBUG HandlerThread:127609 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:37:04.072219', 'startedAt': '2024-05-14T16:37:03.637970', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step20', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3389.1128618421053, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.003, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3396.073, 'min': 800.0, 'max': 3400.0}, {'current': 3396.311, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3396.716, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4235572814941}}, 'memory': {'total': 1007.5000267028809}}
31
+ 2024-05-14 16:37:04,101 INFO HandlerThread:127609 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-14 16:37:04,101 INFO HandlerThread:127609 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-14 16:37:04,102 INFO HandlerThread:127609 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-14 16:37:04,106 DEBUG SenderThread:127609 [sender.py:send():378] send: files
35
+ 2024-05-14 16:37:04,106 INFO SenderThread:127609 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-14 16:37:04,201 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-14 16:37:04,201 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-14 16:37:04,201 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-14 16:37:04,203 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-14 16:37:04,367 DEBUG SenderThread:127609 [sender.py:send():378] send: telemetry
41
+ 2024-05-14 16:37:04,612 INFO wandb-upload_0:127609 [upload_job.py:push():130] Uploaded file /tmp/tmps8ro5vrwwandb/s2uqwxfy-wandb-metadata.json
42
+ 2024-05-14 16:37:04,916 INFO Thread-12 :127609 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log
43
+ 2024-05-14 16:37:04,916 INFO Thread-12 :127609 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/requirements.txt
44
+ 2024-05-14 16:37:04,916 INFO Thread-12 :127609 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-metadata.json
45
+ 2024-05-14 16:37:06,916 INFO Thread-12 :127609 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log
46
+ 2024-05-14 16:37:08,797 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-14 16:37:09,895 DEBUG SenderThread:127609 [sender.py:send():378] send: exit
48
+ 2024-05-14 16:37:09,895 INFO SenderThread:127609 [sender.py:send_exit():585] handling exit code: 1
49
+ 2024-05-14 16:37:09,895 INFO SenderThread:127609 [sender.py:send_exit():587] handling runtime: 5
50
+ 2024-05-14 16:37:09,896 INFO SenderThread:127609 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
51
+ 2024-05-14 16:37:09,896 INFO SenderThread:127609 [sender.py:send_exit():593] send defer
52
+ 2024-05-14 16:37:09,896 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
53
+ 2024-05-14 16:37:09,897 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 0
54
+ 2024-05-14 16:37:09,897 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
55
+ 2024-05-14 16:37:09,897 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 0
56
+ 2024-05-14 16:37:09,897 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 1
57
+ 2024-05-14 16:37:09,897 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
58
+ 2024-05-14 16:37:09,897 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 1
59
+ 2024-05-14 16:37:09,897 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
60
+ 2024-05-14 16:37:09,897 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 1
61
+ 2024-05-14 16:37:09,897 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 2
62
+ 2024-05-14 16:37:09,897 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
63
+ 2024-05-14 16:37:09,897 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 2
64
+ 2024-05-14 16:37:09,897 INFO HandlerThread:127609 [system_monitor.py:finish():203] Stopping system monitor
65
+ 2024-05-14 16:37:09,897 DEBUG SystemMonitor:127609 [system_monitor.py:_start():172] Starting system metrics aggregation loop
66
+ 2024-05-14 16:37:09,898 INFO HandlerThread:127609 [interfaces.py:finish():200] Joined cpu monitor
67
+ 2024-05-14 16:37:09,898 DEBUG SystemMonitor:127609 [system_monitor.py:_start():179] Finished system metrics aggregation loop
68
+ 2024-05-14 16:37:09,899 INFO HandlerThread:127609 [interfaces.py:finish():200] Joined disk monitor
69
+ 2024-05-14 16:37:09,899 DEBUG SystemMonitor:127609 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-14 16:37:09,899 INFO HandlerThread:127609 [interfaces.py:finish():200] Joined memory monitor
71
+ 2024-05-14 16:37:09,900 INFO HandlerThread:127609 [interfaces.py:finish():200] Joined network monitor
72
+ 2024-05-14 16:37:09,900 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
73
+ 2024-05-14 16:37:09,900 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 2
74
+ 2024-05-14 16:37:09,900 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 3
75
+ 2024-05-14 16:37:09,901 DEBUG SenderThread:127609 [sender.py:send():378] send: stats
76
+ 2024-05-14 16:37:09,901 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
77
+ 2024-05-14 16:37:09,901 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 3
78
+ 2024-05-14 16:37:09,901 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
79
+ 2024-05-14 16:37:09,901 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 3
80
+ 2024-05-14 16:37:09,901 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 4
81
+ 2024-05-14 16:37:09,901 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
82
+ 2024-05-14 16:37:09,901 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 4
83
+ 2024-05-14 16:37:09,902 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
84
+ 2024-05-14 16:37:09,902 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 4
85
+ 2024-05-14 16:37:09,902 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 5
86
+ 2024-05-14 16:37:09,902 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
87
+ 2024-05-14 16:37:09,902 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 5
88
+ 2024-05-14 16:37:09,902 DEBUG SenderThread:127609 [sender.py:send():378] send: summary
89
+ 2024-05-14 16:37:09,902 INFO SenderThread:127609 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
90
+ 2024-05-14 16:37:09,903 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
91
+ 2024-05-14 16:37:09,903 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 5
92
+ 2024-05-14 16:37:09,903 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 6
93
+ 2024-05-14 16:37:09,903 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
94
+ 2024-05-14 16:37:09,903 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 6
95
+ 2024-05-14 16:37:09,903 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
96
+ 2024-05-14 16:37:09,903 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 6
97
+ 2024-05-14 16:37:09,905 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: status_report
98
+ 2024-05-14 16:37:09,918 INFO Thread-12 :127609 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-summary.json
99
+ 2024-05-14 16:37:09,977 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 7
100
+ 2024-05-14 16:37:09,978 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-14 16:37:09,978 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 7
102
+ 2024-05-14 16:37:09,978 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
103
+ 2024-05-14 16:37:09,978 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 7
104
+ 2024-05-14 16:37:10,895 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: poll_exit
105
+ 2024-05-14 16:37:10,919 INFO Thread-12 :127609 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log
106
+ 2024-05-14 16:37:10,919 INFO Thread-12 :127609 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/config.yaml
107
+ 2024-05-14 16:37:12,381 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-14 16:37:12,381 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: poll_exit
109
+ 2024-05-14 16:37:12,381 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
110
+ 2024-05-14 16:37:12,381 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 8
111
+ 2024-05-14 16:37:12,382 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
112
+ 2024-05-14 16:37:12,382 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 8
113
+ 2024-05-14 16:37:12,382 INFO SenderThread:127609 [job_builder.py:build():432] Attempting to build job artifact
114
+ 2024-05-14 16:37:12,382 INFO SenderThread:127609 [job_builder.py:_get_source_type():576] no source found
115
+ 2024-05-14 16:37:12,382 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 9
116
+ 2024-05-14 16:37:12,382 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
117
+ 2024-05-14 16:37:12,382 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 9
118
+ 2024-05-14 16:37:12,382 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
119
+ 2024-05-14 16:37:12,383 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 9
120
+ 2024-05-14 16:37:12,383 INFO SenderThread:127609 [dir_watcher.py:finish():358] shutting down directory watcher
121
+ 2024-05-14 16:37:12,895 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: poll_exit
122
+ 2024-05-14 16:37:12,920 INFO SenderThread:127609 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log
123
+ 2024-05-14 16:37:12,920 INFO SenderThread:127609 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files
124
+ 2024-05-14 16:37:12,921 INFO SenderThread:127609 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/config.yaml config.yaml
125
+ 2024-05-14 16:37:12,921 INFO SenderThread:127609 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log output.log
126
+ 2024-05-14 16:37:12,921 INFO SenderThread:127609 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-summary.json wandb-summary.json
127
+ 2024-05-14 16:37:12,921 INFO SenderThread:127609 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/requirements.txt requirements.txt
128
+ 2024-05-14 16:37:12,921 INFO SenderThread:127609 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-metadata.json wandb-metadata.json
129
+ 2024-05-14 16:37:12,924 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-14 16:37:12,924 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: poll_exit
131
+ 2024-05-14 16:37:12,926 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
132
+ 2024-05-14 16:37:12,926 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 10
133
+ 2024-05-14 16:37:12,927 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
134
+ 2024-05-14 16:37:12,927 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 10
135
+ 2024-05-14 16:37:12,927 INFO SenderThread:127609 [file_pusher.py:finish():169] shutting down file pusher
136
+ 2024-05-14 16:37:13,182 INFO wandb-upload_0:127609 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/output.log
137
+ 2024-05-14 16:37:13,315 INFO wandb-upload_1:127609 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/config.yaml
138
+ 2024-05-14 16:37:13,388 INFO wandb-upload_2:127609 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/wandb-summary.json
139
+ 2024-05-14 16:37:13,408 INFO wandb-upload_3:127609 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/files/requirements.txt
140
+ 2024-05-14 16:37:13,608 INFO Thread-11 (_thread_body):127609 [sender.py:transition_state():613] send defer: 11
141
+ 2024-05-14 16:37:13,609 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-14 16:37:13,609 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 11
143
+ 2024-05-14 16:37:13,609 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-14 16:37:13,609 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 11
145
+ 2024-05-14 16:37:13,609 INFO SenderThread:127609 [file_pusher.py:join():175] waiting for file pusher
146
+ 2024-05-14 16:37:13,609 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 12
147
+ 2024-05-14 16:37:13,609 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
148
+ 2024-05-14 16:37:13,609 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 12
149
+ 2024-05-14 16:37:13,610 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
150
+ 2024-05-14 16:37:13,610 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 12
151
+ 2024-05-14 16:37:13,610 INFO SenderThread:127609 [file_stream.py:finish():601] file stream finish called
152
+ 2024-05-14 16:37:13,802 INFO SenderThread:127609 [file_stream.py:finish():605] file stream finish is done
153
+ 2024-05-14 16:37:13,803 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 13
154
+ 2024-05-14 16:37:13,803 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
155
+ 2024-05-14 16:37:13,803 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 13
156
+ 2024-05-14 16:37:13,803 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
157
+ 2024-05-14 16:37:13,803 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 13
158
+ 2024-05-14 16:37:13,803 INFO SenderThread:127609 [sender.py:transition_state():613] send defer: 14
159
+ 2024-05-14 16:37:13,803 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-14 16:37:13,804 INFO HandlerThread:127609 [handler.py:handle_request_defer():184] handle defer: 14
161
+ 2024-05-14 16:37:13,804 DEBUG SenderThread:127609 [sender.py:send():378] send: final
162
+ 2024-05-14 16:37:13,804 DEBUG SenderThread:127609 [sender.py:send():378] send: footer
163
+ 2024-05-14 16:37:13,804 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: defer
164
+ 2024-05-14 16:37:13,804 INFO SenderThread:127609 [sender.py:send_request_defer():609] handle sender defer: 14
165
+ 2024-05-14 16:37:13,805 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: poll_exit
166
+ 2024-05-14 16:37:13,805 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: poll_exit
167
+ 2024-05-14 16:37:13,805 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: poll_exit
168
+ 2024-05-14 16:37:13,805 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: server_info
169
+ 2024-05-14 16:37:13,805 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: poll_exit
170
+ 2024-05-14 16:37:13,806 DEBUG SenderThread:127609 [sender.py:send_request():405] send_request: server_info
171
+ 2024-05-14 16:37:13,807 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: get_summary
172
+ 2024-05-14 16:37:13,807 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: sampled_history
173
+ 2024-05-14 16:37:13,807 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: internal_messages
174
+ 2024-05-14 16:37:13,868 INFO MainThread:127609 [wandb_run.py:_footer_history_summary_info():3994] rendering history
175
+ 2024-05-14 16:37:13,868 INFO MainThread:127609 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
176
+ 2024-05-14 16:37:13,868 INFO MainThread:127609 [wandb_run.py:_footer_sync_info():3953] logging synced files
177
+ 2024-05-14 16:37:13,869 DEBUG HandlerThread:127609 [handler.py:handle_request():158] handle_request: shutdown
178
+ 2024-05-14 16:37:13,869 INFO HandlerThread:127609 [handler.py:finish():882] shutting down handler
179
+ 2024-05-14 16:37:14,806 INFO WriterThread:127609 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/run-t88cgth5.wandb
180
+ 2024-05-14 16:37:14,868 INFO SenderThread:127609 [sender.py:finish():1545] shutting down sender
181
+ 2024-05-14 16:37:14,868 INFO SenderThread:127609 [file_pusher.py:finish():169] shutting down file pusher
182
+ 2024-05-14 16:37:14,868 INFO SenderThread:127609 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Configure stats pid to 126394
3
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-14 16:37:03,645 WARNING MainThread:126394 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-14 16:37:03,645 INFO MainThread:126394 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug.log
11
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/logs/debug-internal.log
12
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:init():610] starting backend
16
+ 2024-05-14 16:37:03,646 INFO MainThread:126394 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-14 16:37:03,647 INFO MainThread:126394 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-14 16:37:03,648 INFO MainThread:126394 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-14 16:37:03,651 INFO MainThread:126394 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-14 16:37:03,662 INFO MainThread:126394 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-14 16:37:03,921 INFO MainThread:126394 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-14 16:37:04,001 INFO MainThread:126394 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-14 16:37:04,001 INFO MainThread:126394 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-14 16:37:04,201 INFO MainThread:126394 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-14 16:37:04,201 INFO MainThread:126394 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-14 16:37:04,201 INFO MainThread:126394 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-14 16:37:04,202 INFO MainThread:126394 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-14 16:37:04,203 INFO MainThread:126394 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-14 16:37:14,869 WARNING MsgRouterThr:126394 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240514_163703-t88cgth5/run-t88cgth5.wandb ADDED
Binary file (10.5 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716441447
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:05:17:28,264 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:05:17:32,782 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:05:17:32,783 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:05:17:32,783 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000'}
6
+ 2024-05-23:05:17:34,013 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T05:17:28.136110",
5
+ "startedAt": "2024-05-23T05:17:27.718901",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2333.75651875,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3327.471,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3327.443,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3399.275,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3325.804,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 3399.225,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 212.18668365478516
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.43798828125
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 5}}
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug-internal.log ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 05:17:27,734 INFO StreamThr :9831 [internal.py:wandb_internal():85] W&B internal server running at pid: 9831, started at: 2024-05-23 05:17:27.733399
2
+ 2024-05-23 05:17:27,735 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 05:17:27,736 INFO WriterThread:9831 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/run-xsim9azn.wandb
4
+ 2024-05-23 05:17:27,737 DEBUG SenderThread:9831 [sender.py:send():378] send: header
5
+ 2024-05-23 05:17:27,747 DEBUG SenderThread:9831 [sender.py:send():378] send: run
6
+ 2024-05-23 05:17:27,968 INFO SenderThread:9831 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files
7
+ 2024-05-23 05:17:27,968 INFO SenderThread:9831 [sender.py:_start_run_threads():1123] run started: xsim9azn with start time 1716441447.73305
8
+ 2024-05-23 05:17:27,975 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 05:17:27,975 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 05:17:28,060 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 05:17:28,062 DEBUG HandlerThread:9831 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 05:17:28,062 DEBUG HandlerThread:9831 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 05:17:28,062 INFO HandlerThread:9831 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 05:17:28,062 INFO SystemMonitor:9831 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 05:17:28,062 INFO HandlerThread:9831 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 05:17:28,063 INFO SystemMonitor:9831 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 05:17:28,063 INFO SystemMonitor:9831 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 05:17:28,064 INFO SystemMonitor:9831 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 05:17:28,064 INFO SystemMonitor:9831 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 05:17:28,136 DEBUG HandlerThread:9831 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 05:17:28,144 DEBUG HandlerThread:9831 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 05:17:28,163 ERROR HandlerThread:9831 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 05:17:28,163 DEBUG HandlerThread:9831 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 05:17:28,163 DEBUG HandlerThread:9831 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 05:17:28,163 DEBUG HandlerThread:9831 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T05:17:28.136110', 'startedAt': '2024-05-23T05:17:27.718901', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step16000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2333.75651875, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3327.471, 'min': 800.0, 'max': 3400.0}, {'current': 3327.443, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.275, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3325.804, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.225, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.18668365478516}}, 'memory': {'total': 1007.43798828125}}
31
+ 2024-05-23 05:17:28,163 INFO HandlerThread:9831 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 05:17:28,163 INFO HandlerThread:9831 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 05:17:28,165 INFO HandlerThread:9831 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 05:17:28,169 DEBUG SenderThread:9831 [sender.py:send():378] send: files
35
+ 2024-05-23 05:17:28,169 INFO SenderThread:9831 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 05:17:28,260 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 05:17:28,261 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 05:17:28,261 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 05:17:28,262 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 05:17:28,376 DEBUG SenderThread:9831 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 05:17:28,755 INFO wandb-upload_0:9831 [upload_job.py:push():130] Uploaded file /tmp/tmp_3wqr1puwandb/c3u76i0j-wandb-metadata.json
42
+ 2024-05-23 05:17:28,970 INFO Thread-12 :9831 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/requirements.txt
43
+ 2024-05-23 05:17:28,970 INFO Thread-12 :9831 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log
44
+ 2024-05-23 05:17:28,970 INFO Thread-12 :9831 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-metadata.json
45
+ 2024-05-23 05:17:30,971 INFO Thread-12 :9831 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log
46
+ 2024-05-23 05:17:32,783 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 05:17:34,019 DEBUG SenderThread:9831 [sender.py:send():378] send: exit
48
+ 2024-05-23 05:17:34,019 INFO SenderThread:9831 [sender.py:send_exit():585] handling exit code: 1
49
+ 2024-05-23 05:17:34,019 INFO SenderThread:9831 [sender.py:send_exit():587] handling runtime: 5
50
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
51
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:send_exit():593] send defer
52
+ 2024-05-23 05:17:34,021 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
53
+ 2024-05-23 05:17:34,021 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 0
54
+ 2024-05-23 05:17:34,021 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
55
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 0
56
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 1
57
+ 2024-05-23 05:17:34,021 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
58
+ 2024-05-23 05:17:34,021 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 1
59
+ 2024-05-23 05:17:34,021 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
60
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 1
61
+ 2024-05-23 05:17:34,021 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 2
62
+ 2024-05-23 05:17:34,022 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
63
+ 2024-05-23 05:17:34,022 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 2
64
+ 2024-05-23 05:17:34,022 INFO HandlerThread:9831 [system_monitor.py:finish():203] Stopping system monitor
65
+ 2024-05-23 05:17:34,022 DEBUG SystemMonitor:9831 [system_monitor.py:_start():172] Starting system metrics aggregation loop
66
+ 2024-05-23 05:17:34,022 INFO HandlerThread:9831 [interfaces.py:finish():200] Joined cpu monitor
67
+ 2024-05-23 05:17:34,022 DEBUG SystemMonitor:9831 [system_monitor.py:_start():179] Finished system metrics aggregation loop
68
+ 2024-05-23 05:17:34,022 INFO HandlerThread:9831 [interfaces.py:finish():200] Joined disk monitor
69
+ 2024-05-23 05:17:34,022 DEBUG SystemMonitor:9831 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 05:17:34,022 INFO HandlerThread:9831 [interfaces.py:finish():200] Joined memory monitor
71
+ 2024-05-23 05:17:34,024 INFO HandlerThread:9831 [interfaces.py:finish():200] Joined network monitor
72
+ 2024-05-23 05:17:34,024 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
73
+ 2024-05-23 05:17:34,024 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 2
74
+ 2024-05-23 05:17:34,024 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 3
75
+ 2024-05-23 05:17:34,024 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
76
+ 2024-05-23 05:17:34,024 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 3
77
+ 2024-05-23 05:17:34,025 DEBUG SenderThread:9831 [sender.py:send():378] send: stats
78
+ 2024-05-23 05:17:34,025 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
79
+ 2024-05-23 05:17:34,025 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 3
80
+ 2024-05-23 05:17:34,025 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 4
81
+ 2024-05-23 05:17:34,025 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
82
+ 2024-05-23 05:17:34,025 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 4
83
+ 2024-05-23 05:17:34,025 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
84
+ 2024-05-23 05:17:34,025 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 4
85
+ 2024-05-23 05:17:34,026 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 5
86
+ 2024-05-23 05:17:34,026 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
87
+ 2024-05-23 05:17:34,026 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 5
88
+ 2024-05-23 05:17:34,026 DEBUG SenderThread:9831 [sender.py:send():378] send: summary
89
+ 2024-05-23 05:17:34,026 INFO SenderThread:9831 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
90
+ 2024-05-23 05:17:34,027 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
91
+ 2024-05-23 05:17:34,027 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 5
92
+ 2024-05-23 05:17:34,027 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 6
93
+ 2024-05-23 05:17:34,027 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
94
+ 2024-05-23 05:17:34,027 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 6
95
+ 2024-05-23 05:17:34,027 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
96
+ 2024-05-23 05:17:34,027 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 6
97
+ 2024-05-23 05:17:34,029 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: status_report
98
+ 2024-05-23 05:17:34,096 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 7
99
+ 2024-05-23 05:17:34,096 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
100
+ 2024-05-23 05:17:34,096 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 7
101
+ 2024-05-23 05:17:34,097 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
102
+ 2024-05-23 05:17:34,097 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 7
103
+ 2024-05-23 05:17:34,975 INFO Thread-12 :9831 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log
104
+ 2024-05-23 05:17:34,975 INFO Thread-12 :9831 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/config.yaml
105
+ 2024-05-23 05:17:34,975 INFO Thread-12 :9831 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-summary.json
106
+ 2024-05-23 05:17:35,019 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: poll_exit
107
+ 2024-05-23 05:17:36,397 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-23 05:17:36,397 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: poll_exit
109
+ 2024-05-23 05:17:36,397 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
110
+ 2024-05-23 05:17:36,397 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 8
111
+ 2024-05-23 05:17:36,398 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
112
+ 2024-05-23 05:17:36,398 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 8
113
+ 2024-05-23 05:17:36,398 INFO SenderThread:9831 [job_builder.py:build():432] Attempting to build job artifact
114
+ 2024-05-23 05:17:36,398 INFO SenderThread:9831 [job_builder.py:_get_source_type():576] no source found
115
+ 2024-05-23 05:17:36,398 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 9
116
+ 2024-05-23 05:17:36,398 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
117
+ 2024-05-23 05:17:36,398 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 9
118
+ 2024-05-23 05:17:36,399 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
119
+ 2024-05-23 05:17:36,399 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 9
120
+ 2024-05-23 05:17:36,399 INFO SenderThread:9831 [dir_watcher.py:finish():358] shutting down directory watcher
121
+ 2024-05-23 05:17:36,977 INFO SenderThread:9831 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log
122
+ 2024-05-23 05:17:36,977 INFO SenderThread:9831 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files
123
+ 2024-05-23 05:17:36,977 INFO SenderThread:9831 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-summary.json wandb-summary.json
124
+ 2024-05-23 05:17:36,978 INFO SenderThread:9831 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/requirements.txt requirements.txt
125
+ 2024-05-23 05:17:36,978 INFO SenderThread:9831 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-metadata.json wandb-metadata.json
126
+ 2024-05-23 05:17:36,978 INFO SenderThread:9831 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/config.yaml config.yaml
127
+ 2024-05-23 05:17:36,978 INFO SenderThread:9831 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log output.log
128
+ 2024-05-23 05:17:36,978 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-23 05:17:36,978 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-23 05:17:36,981 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 10
131
+ 2024-05-23 05:17:36,981 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-23 05:17:36,981 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 10
133
+ 2024-05-23 05:17:36,982 INFO SenderThread:9831 [file_pusher.py:finish():169] shutting down file pusher
134
+ 2024-05-23 05:17:37,019 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: poll_exit
135
+ 2024-05-23 05:17:37,020 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: poll_exit
136
+ 2024-05-23 05:17:37,239 INFO wandb-upload_1:9831 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/requirements.txt
137
+ 2024-05-23 05:17:37,382 INFO wandb-upload_0:9831 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/wandb-summary.json
138
+ 2024-05-23 05:17:37,455 INFO wandb-upload_2:9831 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/config.yaml
139
+ 2024-05-23 05:17:37,512 INFO wandb-upload_3:9831 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/files/output.log
140
+ 2024-05-23 05:17:37,712 INFO Thread-11 (_thread_body):9831 [sender.py:transition_state():613] send defer: 11
141
+ 2024-05-23 05:17:37,713 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-23 05:17:37,713 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 11
143
+ 2024-05-23 05:17:37,713 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-23 05:17:37,713 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 11
145
+ 2024-05-23 05:17:37,713 INFO SenderThread:9831 [file_pusher.py:join():175] waiting for file pusher
146
+ 2024-05-23 05:17:37,713 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 12
147
+ 2024-05-23 05:17:37,714 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
148
+ 2024-05-23 05:17:37,714 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 12
149
+ 2024-05-23 05:17:37,714 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
150
+ 2024-05-23 05:17:37,714 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 12
151
+ 2024-05-23 05:17:37,714 INFO SenderThread:9831 [file_stream.py:finish():601] file stream finish called
152
+ 2024-05-23 05:17:37,926 INFO SenderThread:9831 [file_stream.py:finish():605] file stream finish is done
153
+ 2024-05-23 05:17:37,926 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 13
154
+ 2024-05-23 05:17:37,926 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
155
+ 2024-05-23 05:17:37,926 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 13
156
+ 2024-05-23 05:17:37,927 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
157
+ 2024-05-23 05:17:37,927 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 13
158
+ 2024-05-23 05:17:37,927 INFO SenderThread:9831 [sender.py:transition_state():613] send defer: 14
159
+ 2024-05-23 05:17:37,927 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-23 05:17:37,927 INFO HandlerThread:9831 [handler.py:handle_request_defer():184] handle defer: 14
161
+ 2024-05-23 05:17:37,927 DEBUG SenderThread:9831 [sender.py:send():378] send: final
162
+ 2024-05-23 05:17:37,927 DEBUG SenderThread:9831 [sender.py:send():378] send: footer
163
+ 2024-05-23 05:17:37,927 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: defer
164
+ 2024-05-23 05:17:37,927 INFO SenderThread:9831 [sender.py:send_request_defer():609] handle sender defer: 14
165
+ 2024-05-23 05:17:37,928 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: poll_exit
166
+ 2024-05-23 05:17:37,928 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: poll_exit
167
+ 2024-05-23 05:17:37,928 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: poll_exit
168
+ 2024-05-23 05:17:37,928 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: server_info
169
+ 2024-05-23 05:17:37,929 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: poll_exit
170
+ 2024-05-23 05:17:37,929 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 05:17:37,929 DEBUG SenderThread:9831 [sender.py:send_request():405] send_request: server_info
172
+ 2024-05-23 05:17:37,929 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: sampled_history
173
+ 2024-05-23 05:17:37,930 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: internal_messages
174
+ 2024-05-23 05:17:37,983 INFO MainThread:9831 [wandb_run.py:_footer_history_summary_info():3994] rendering history
175
+ 2024-05-23 05:17:37,983 INFO MainThread:9831 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
176
+ 2024-05-23 05:17:37,983 INFO MainThread:9831 [wandb_run.py:_footer_sync_info():3953] logging synced files
177
+ 2024-05-23 05:17:37,983 DEBUG HandlerThread:9831 [handler.py:handle_request():158] handle_request: shutdown
178
+ 2024-05-23 05:17:37,983 INFO HandlerThread:9831 [handler.py:finish():882] shutting down handler
179
+ 2024-05-23 05:17:38,929 INFO WriterThread:9831 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/run-xsim9azn.wandb
180
+ 2024-05-23 05:17:38,983 INFO SenderThread:9831 [sender.py:finish():1545] shutting down sender
181
+ 2024-05-23 05:17:38,983 INFO SenderThread:9831 [file_pusher.py:finish():169] shutting down file pusher
182
+ 2024-05-23 05:17:38,983 INFO SenderThread:9831 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Configure stats pid to 9676
3
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 05:17:27,730 WARNING MainThread:9676 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug.log
11
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/logs/debug-internal.log
12
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 05:17:27,730 INFO MainThread:9676 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 05:17:27,732 INFO MainThread:9676 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 05:17:27,732 INFO MainThread:9676 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 05:17:27,735 INFO MainThread:9676 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 05:17:27,746 INFO MainThread:9676 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 05:17:27,974 INFO MainThread:9676 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 05:17:28,056 INFO MainThread:9676 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 05:17:28,056 INFO MainThread:9676 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 05:17:28,261 INFO MainThread:9676 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 05:17:28,261 INFO MainThread:9676 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 05:17:28,261 INFO MainThread:9676 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 05:17:28,261 INFO MainThread:9676 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 05:17:28,262 INFO MainThread:9676 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 05:17:38,985 WARNING MsgRouterThr:9676 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_051727-xsim9azn/run-xsim9azn.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716469805
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:13:10:06,122 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:13:10:14,511 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:13:10:14,513 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:13:10:14,513 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000'}
6
+ 2024-05-23:13:10:16,939 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.3.0
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T13:10:05.921078",
5
+ "startedAt": "2024-05-23T13:10:05.381984",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.50000625,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.62947463989258
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379539489746
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:10:05,402 INFO StreamThr :3633 [internal.py:wandb_internal():85] W&B internal server running at pid: 3633, started at: 2024-05-23 13:10:05.400548
2
+ 2024-05-23 13:10:05,407 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 13:10:05,409 INFO WriterThread:3633 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/run-q495m75j.wandb
4
+ 2024-05-23 13:10:05,410 DEBUG SenderThread:3633 [sender.py:send():378] send: header
5
+ 2024-05-23 13:10:05,413 DEBUG SenderThread:3633 [sender.py:send():378] send: run
6
+ 2024-05-23 13:10:05,710 INFO SenderThread:3633 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files
7
+ 2024-05-23 13:10:05,711 INFO SenderThread:3633 [sender.py:_start_run_threads():1123] run started: q495m75j with start time 1716469805.401001
8
+ 2024-05-23 13:10:05,719 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 13:10:05,719 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 13:10:05,846 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 13:10:05,848 DEBUG HandlerThread:3633 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 13:10:05,848 DEBUG HandlerThread:3633 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 13:10:05,848 INFO HandlerThread:3633 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 13:10:05,848 INFO SystemMonitor:3633 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 13:10:05,848 INFO HandlerThread:3633 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 13:10:05,855 INFO SystemMonitor:3633 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 13:10:05,855 INFO SystemMonitor:3633 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 13:10:05,856 INFO SystemMonitor:3633 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 13:10:05,856 INFO SystemMonitor:3633 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 13:10:05,921 DEBUG HandlerThread:3633 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 13:10:05,924 DEBUG HandlerThread:3633 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 13:10:05,933 ERROR HandlerThread:3633 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 13:10:05,933 DEBUG HandlerThread:3633 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 13:10:05,933 DEBUG HandlerThread:3633 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 13:10:05,933 DEBUG HandlerThread:3633 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:10:05.921078', 'startedAt': '2024-05-23T13:10:05.381984', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step28000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.50000625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.62947463989258}}, 'memory': {'total': 1007.4379539489746}}
31
+ 2024-05-23 13:10:05,934 INFO HandlerThread:3633 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 13:10:05,934 INFO HandlerThread:3633 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 13:10:05,937 INFO HandlerThread:3633 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 13:10:05,942 DEBUG SenderThread:3633 [sender.py:send():378] send: files
35
+ 2024-05-23 13:10:05,942 INFO SenderThread:3633 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 13:10:06,114 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 13:10:06,115 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 13:10:06,115 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 13:10:06,118 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 13:10:06,274 DEBUG SenderThread:3633 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 13:10:06,544 INFO wandb-upload_0:3633 [upload_job.py:push():130] Uploaded file /tmp/tmpsaxvtbfqwandb/swf409i5-wandb-metadata.json
42
+ 2024-05-23 13:10:06,713 INFO Thread-12 :3633 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log
43
+ 2024-05-23 13:10:06,714 INFO Thread-12 :3633 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-metadata.json
44
+ 2024-05-23 13:10:06,714 INFO Thread-12 :3633 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/requirements.txt
45
+ 2024-05-23 13:10:08,713 INFO Thread-12 :3633 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log
46
+ 2024-05-23 13:10:11,277 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 13:10:16,514 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 13:10:16,720 INFO Thread-12 :3633 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log
49
+ 2024-05-23 13:10:16,957 DEBUG SenderThread:3633 [sender.py:send():378] send: exit
50
+ 2024-05-23 13:10:16,957 INFO SenderThread:3633 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 13:10:16,957 INFO SenderThread:3633 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 13:10:16,963 INFO SenderThread:3633 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 13:10:16,963 INFO SenderThread:3633 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 13:10:16,963 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 13:10:16,963 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 13:10:16,963 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 13:10:16,963 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 13:10:16,963 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 13:10:16,963 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 13:10:16,963 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 13:10:16,964 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 13:10:16,964 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 13:10:16,964 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 13:10:16,964 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 13:10:16,964 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 13:10:16,964 INFO HandlerThread:3633 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 13:10:16,964 INFO HandlerThread:3633 [interfaces.py:finish():200] Joined cpu monitor
68
+ 2024-05-23 13:10:16,965 INFO HandlerThread:3633 [interfaces.py:finish():200] Joined disk monitor
69
+ 2024-05-23 13:10:16,965 INFO HandlerThread:3633 [interfaces.py:finish():200] Joined memory monitor
70
+ 2024-05-23 13:10:16,965 INFO HandlerThread:3633 [interfaces.py:finish():200] Joined network monitor
71
+ 2024-05-23 13:10:16,964 DEBUG SystemMonitor:3633 [system_monitor.py:_start():172] Starting system metrics aggregation loop
72
+ 2024-05-23 13:10:16,965 DEBUG SystemMonitor:3633 [system_monitor.py:_start():179] Finished system metrics aggregation loop
73
+ 2024-05-23 13:10:16,965 DEBUG SystemMonitor:3633 [system_monitor.py:_start():183] Publishing last batch of metrics
74
+ 2024-05-23 13:10:16,968 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 13:10:16,968 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 13:10:16,968 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 13:10:16,968 DEBUG SenderThread:3633 [sender.py:send():378] send: stats
78
+ 2024-05-23 13:10:16,969 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 13:10:16,969 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 13:10:16,969 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 13:10:16,969 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 13:10:16,969 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 13:10:16,969 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 13:10:16,969 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 13:10:16,970 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 13:10:16,970 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 13:10:16,970 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 13:10:16,970 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 13:10:16,970 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 13:10:16,970 DEBUG SenderThread:3633 [sender.py:send():378] send: summary
91
+ 2024-05-23 13:10:16,971 INFO SenderThread:3633 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 13:10:16,971 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 13:10:16,971 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 13:10:16,971 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 13:10:16,971 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 13:10:16,971 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 13:10:16,971 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 13:10:16,971 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 13:10:16,976 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 13:10:17,061 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 13:10:17,061 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 13:10:17,061 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 13:10:17,061 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 13:10:17,061 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 13:10:17,722 INFO Thread-12 :3633 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/config.yaml
106
+ 2024-05-23 13:10:17,722 INFO Thread-12 :3633 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-summary.json
107
+ 2024-05-23 13:10:17,957 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 13:10:18,299 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 13:10:18,300 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 13:10:18,300 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 13:10:18,300 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 13:10:18,300 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 13:10:18,300 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 13:10:18,300 INFO SenderThread:3633 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 13:10:18,301 INFO SenderThread:3633 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 13:10:18,301 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 13:10:18,301 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 13:10:18,301 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 13:10:18,301 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 13:10:18,301 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 13:10:18,301 INFO SenderThread:3633 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 13:10:18,723 INFO SenderThread:3633 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log
123
+ 2024-05-23 13:10:18,724 INFO SenderThread:3633 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files
124
+ 2024-05-23 13:10:18,724 INFO SenderThread:3633 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log output.log
125
+ 2024-05-23 13:10:18,724 INFO SenderThread:3633 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-summary.json wandb-summary.json
126
+ 2024-05-23 13:10:18,726 INFO SenderThread:3633 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-metadata.json wandb-metadata.json
127
+ 2024-05-23 13:10:18,728 INFO SenderThread:3633 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/requirements.txt requirements.txt
128
+ 2024-05-23 13:10:18,728 INFO SenderThread:3633 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/config.yaml config.yaml
129
+ 2024-05-23 13:10:18,729 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 13:10:18,730 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 13:10:18,730 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 13:10:18,731 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 13:10:18,731 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 13:10:18,731 INFO SenderThread:3633 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 13:10:18,957 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: poll_exit
136
+ 2024-05-23 13:10:18,957 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: poll_exit
137
+ 2024-05-23 13:10:18,974 INFO wandb-upload_0:3633 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/output.log
138
+ 2024-05-23 13:10:19,385 INFO wandb-upload_3:3633 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/config.yaml
139
+ 2024-05-23 13:10:19,393 INFO wandb-upload_2:3633 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/requirements.txt
140
+ 2024-05-23 13:10:19,411 INFO wandb-upload_1:3633 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/files/wandb-summary.json
141
+ 2024-05-23 13:10:19,611 INFO Thread-11 (_thread_body):3633 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 13:10:19,611 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 13:10:19,611 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 13:10:19,611 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 13:10:19,611 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 13:10:19,611 INFO SenderThread:3633 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 13:10:19,611 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 13:10:19,612 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 13:10:19,612 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 13:10:19,612 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 13:10:19,612 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 13:10:19,612 INFO SenderThread:3633 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 13:10:19,688 INFO SenderThread:3633 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 13:10:19,688 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 13:10:19,688 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 13:10:19,688 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 13:10:19,689 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 13:10:19,689 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 13:10:19,689 INFO SenderThread:3633 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 13:10:19,689 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 13:10:19,689 INFO HandlerThread:3633 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 13:10:19,689 DEBUG SenderThread:3633 [sender.py:send():378] send: final
163
+ 2024-05-23 13:10:19,689 DEBUG SenderThread:3633 [sender.py:send():378] send: footer
164
+ 2024-05-23 13:10:19,689 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 13:10:19,689 INFO SenderThread:3633 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 13:10:19,689 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 13:10:19,690 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: poll_exit
168
+ 2024-05-23 13:10:19,690 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 13:10:19,690 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: server_info
170
+ 2024-05-23 13:10:19,690 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 13:10:19,690 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-23 13:10:19,690 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-23 13:10:19,691 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 13:10:19,691 DEBUG SenderThread:3633 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 13:10:19,752 INFO MainThread:3633 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 13:10:19,752 INFO MainThread:3633 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 13:10:19,752 INFO MainThread:3633 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 13:10:19,753 DEBUG HandlerThread:3633 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 13:10:19,753 INFO HandlerThread:3633 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 13:10:20,691 INFO WriterThread:3633 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/run-q495m75j.wandb
181
+ 2024-05-23 13:10:20,752 INFO SenderThread:3633 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 13:10:20,752 INFO SenderThread:3633 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 13:10:20,752 INFO SenderThread:3633 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:10:05,395 INFO MainThread:3478 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Configure stats pid to 3478
3
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 13:10:05,396 WARNING MainThread:3478 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug.log
11
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/logs/debug-internal.log
12
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 13:10:05,396 INFO MainThread:3478 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 13:10:05,399 INFO MainThread:3478 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 13:10:05,400 INFO MainThread:3478 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 13:10:05,404 INFO MainThread:3478 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 13:10:05,412 INFO MainThread:3478 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 13:10:05,719 INFO MainThread:3478 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 13:10:05,840 INFO MainThread:3478 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 13:10:05,840 INFO MainThread:3478 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 13:10:06,116 INFO MainThread:3478 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 13:10:06,116 INFO MainThread:3478 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 13:10:06,116 INFO MainThread:3478 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 13:10:06,116 INFO MainThread:3478 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 13:10:06,119 INFO MainThread:3478 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 13:10:20,754 WARNING MsgRouterThr:3478 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_131005-q495m75j/run-q495m75j.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/requirements.txt ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.2
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pip==22.0.2
81
+ pip==23.3.1
82
+ platformdirs==4.2.1
83
+ pluggy==1.5.0
84
+ portalocker==2.8.2
85
+ pre-commit==3.3.3
86
+ pretty-errors==1.2.25
87
+ protobuf==3.20.3
88
+ psutil==5.9.8
89
+ py-cpuinfo==9.0.0
90
+ pyarrow-hotfix==0.6
91
+ pyarrow==16.1.0
92
+ pyasn1==0.6.0
93
+ pyasn1_modules==0.4.0
94
+ pybind11==2.10.4
95
+ pycparser==2.22
96
+ pydantic==1.10.13
97
+ pynvml==8.0.4
98
+ pytablewriter==1.2.0
99
+ pytest==8.2.0
100
+ python-dateutil==2.9.0.post0
101
+ pytorch-lightning==2.2.4
102
+ pytz==2024.1
103
+ regex==2023.5.5
104
+ requests-oauthlib==2.0.0
105
+ requests==2.31.0
106
+ rouge_score==0.1.2
107
+ rsa==4.9
108
+ sacrebleu==2.4.2
109
+ safetensors==0.4.3
110
+ scikit-learn==1.5.0
111
+ scipy==1.13.1
112
+ sentencepiece==0.2.0
113
+ sentry-sdk==2.3.1
114
+ setproctitle==1.3.3
115
+ setuptools==59.6.0
116
+ setuptools==69.5.1
117
+ six==1.16.0
118
+ smmap==5.0.1
119
+ sqlitedict==2.1.0
120
+ symengine==0.11.0
121
+ sympy==1.12
122
+ tabledata==1.3.3
123
+ tabulate==0.9.0
124
+ tcolorpy==0.1.6
125
+ tdqm==0.0.1
126
+ tensorboard-data-server==0.6.1
127
+ tensorboard-plugin-wit==1.8.1
128
+ tensorboard==2.11.2
129
+ threadpoolctl==3.5.0
130
+ tokenizers==0.15.2
131
+ tomli==2.0.1
132
+ torch==2.2.0a0+git8964477
133
+ torch_tb_profiler==0.4.0
134
+ torchaudio==2.2.0+08901ad
135
+ torchdata==0.7.1+5e6f7b7
136
+ torchmetrics==1.4.0
137
+ torchtext==0.17.0+400da5c
138
+ torchvision==0.17.0+b2383d4
139
+ tqdm-multiprocess==0.0.11
140
+ tqdm==4.66.4
141
+ transformers==4.36.2
142
+ typepy==1.3.2
143
+ typing_extensions==4.11.0
144
+ tzdata==2024.1
145
+ urllib3==1.26.18
146
+ virtualenv==20.26.1
147
+ wandb==0.17.0
148
+ wheel==0.37.1
149
+ wheel==0.43.0
150
+ word2number==1.1
151
+ xxhash==3.4.1
152
+ yamllint==1.35.1
153
+ yarl==1.9.4
154
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240530_070447-fi4sos5j/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 39}}
venv/lib/python3.10/site-packages/transformers/models/bertweet/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import _LazyModule
18
+
19
+
20
+ _import_structure = {"tokenization_bertweet": ["BertweetTokenizer"]}
21
+
22
+
23
+ if TYPE_CHECKING:
24
+ from .tokenization_bertweet import BertweetTokenizer
25
+
26
+ else:
27
+ import sys
28
+
29
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (513 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/bertweet/__pycache__/tokenization_bertweet.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bertweet/tokenization_bertweet.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
3
+ # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ Tokenization classes for BERTweet"""
17
+
18
+
19
+ import html
20
+ import os
21
+ import re
22
+ from shutil import copyfile
23
+ from typing import List, Optional, Tuple
24
+
25
+ import regex
26
+
27
+ from ...tokenization_utils import PreTrainedTokenizer
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ VOCAB_FILES_NAMES = {
34
+ "vocab_file": "vocab.txt",
35
+ "merges_file": "bpe.codes",
36
+ }
37
+
38
+
39
+ def get_pairs(word):
40
+ """
41
+ Return set of symbol pairs in a word.
42
+
43
+ Word is represented as tuple of symbols (symbols being variable-length strings).
44
+ """
45
+ pairs = set()
46
+ prev_char = word[0]
47
+ for char in word[1:]:
48
+ pairs.add((prev_char, char))
49
+ prev_char = char
50
+
51
+ pairs = set(pairs)
52
+ return pairs
53
+
54
+
55
+ class BertweetTokenizer(PreTrainedTokenizer):
56
+ """
57
+ Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
58
+
59
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
60
+ this superclass for more information regarding those methods.
61
+
62
+ Args:
63
+ vocab_file (`str`):
64
+ Path to the vocabulary file.
65
+ merges_file (`str`):
66
+ Path to the merges file.
67
+ normalization (`bool`, *optional*, defaults to `False`):
68
+ Whether or not to apply a normalization preprocess.
69
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
70
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
71
+
72
+ <Tip>
73
+
74
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
75
+ sequence. The token used is the `cls_token`.
76
+
77
+ </Tip>
78
+
79
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
80
+ The end of sequence token.
81
+
82
+ <Tip>
83
+
84
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
85
+ The token used is the `sep_token`.
86
+
87
+ </Tip>
88
+
89
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
90
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
91
+ sequence classification or for a text and a question for question answering. It is also used as the last
92
+ token of a sequence built with special tokens.
93
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
94
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
95
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
96
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
97
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
98
+ token instead.
99
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
100
+ The token used for padding, for example when batching sequences of different lengths.
101
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
102
+ The token used for masking values. This is the token used when training this model with masked language
103
+ modeling. This is the token which the model will try to predict.
104
+ """
105
+
106
+ vocab_files_names = VOCAB_FILES_NAMES
107
+
108
+ def __init__(
109
+ self,
110
+ vocab_file,
111
+ merges_file,
112
+ normalization=False,
113
+ bos_token="<s>",
114
+ eos_token="</s>",
115
+ sep_token="</s>",
116
+ cls_token="<s>",
117
+ unk_token="<unk>",
118
+ pad_token="<pad>",
119
+ mask_token="<mask>",
120
+ **kwargs,
121
+ ):
122
+ try:
123
+ from emoji import demojize
124
+
125
+ self.demojizer = demojize
126
+ except ImportError:
127
+ logger.warning(
128
+ "emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
129
+ " install emoji==0.6.0"
130
+ )
131
+ self.demojizer = None
132
+
133
+ self.vocab_file = vocab_file
134
+ self.merges_file = merges_file
135
+
136
+ self.encoder = {}
137
+ self.encoder[str(bos_token)] = 0
138
+ self.encoder[str(pad_token)] = 1
139
+ self.encoder[str(eos_token)] = 2
140
+ self.encoder[str(unk_token)] = 3
141
+
142
+ self.add_from_file(vocab_file)
143
+
144
+ self.decoder = {v: k for k, v in self.encoder.items()}
145
+
146
+ with open(merges_file, encoding="utf-8") as merges_handle:
147
+ merges = merges_handle.read().split("\n")[:-1]
148
+ merges = [tuple(merge.split()[:-1]) for merge in merges]
149
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
150
+ self.cache = {}
151
+
152
+ self.normalization = normalization
153
+ self.tweetPreprocessor = TweetTokenizer()
154
+ self.special_puncts = {"’": "'", "…": "..."}
155
+
156
+ super().__init__(
157
+ normalization=normalization,
158
+ bos_token=bos_token,
159
+ eos_token=eos_token,
160
+ sep_token=sep_token,
161
+ cls_token=cls_token,
162
+ unk_token=unk_token,
163
+ pad_token=pad_token,
164
+ mask_token=mask_token,
165
+ **kwargs,
166
+ )
167
+
168
+ def build_inputs_with_special_tokens(
169
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
170
+ ) -> List[int]:
171
+ """
172
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
173
+ adding special tokens. A BERTweet sequence has the following format:
174
+
175
+ - single sequence: `<s> X </s>`
176
+ - pair of sequences: `<s> A </s></s> B </s>`
177
+
178
+ Args:
179
+ token_ids_0 (`List[int]`):
180
+ List of IDs to which the special tokens will be added.
181
+ token_ids_1 (`List[int]`, *optional*):
182
+ Optional second list of IDs for sequence pairs.
183
+
184
+ Returns:
185
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
186
+ """
187
+
188
+ if token_ids_1 is None:
189
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
190
+ cls = [self.cls_token_id]
191
+ sep = [self.sep_token_id]
192
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
193
+
194
+ def get_special_tokens_mask(
195
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
196
+ ) -> List[int]:
197
+ """
198
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
199
+ special tokens using the tokenizer `prepare_for_model` method.
200
+
201
+ Args:
202
+ token_ids_0 (`List[int]`):
203
+ List of IDs.
204
+ token_ids_1 (`List[int]`, *optional*):
205
+ Optional second list of IDs for sequence pairs.
206
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
207
+ Whether or not the token list is already formatted with special tokens for the model.
208
+
209
+ Returns:
210
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
211
+ """
212
+
213
+ if already_has_special_tokens:
214
+ return super().get_special_tokens_mask(
215
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
216
+ )
217
+
218
+ if token_ids_1 is None:
219
+ return [1] + ([0] * len(token_ids_0)) + [1]
220
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
221
+
222
+ def create_token_type_ids_from_sequences(
223
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
224
+ ) -> List[int]:
225
+ """
226
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
227
+ not make use of token type ids, therefore a list of zeros is returned.
228
+
229
+ Args:
230
+ token_ids_0 (`List[int]`):
231
+ List of IDs.
232
+ token_ids_1 (`List[int]`, *optional*):
233
+ Optional second list of IDs for sequence pairs.
234
+
235
+ Returns:
236
+ `List[int]`: List of zeros.
237
+ """
238
+
239
+ sep = [self.sep_token_id]
240
+ cls = [self.cls_token_id]
241
+
242
+ if token_ids_1 is None:
243
+ return len(cls + token_ids_0 + sep) * [0]
244
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
245
+
246
+ @property
247
+ def vocab_size(self):
248
+ return len(self.encoder)
249
+
250
+ def get_vocab(self):
251
+ return dict(self.encoder, **self.added_tokens_encoder)
252
+
253
+ def bpe(self, token):
254
+ if token in self.cache:
255
+ return self.cache[token]
256
+ word = tuple(token)
257
+ word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
258
+ pairs = get_pairs(word)
259
+
260
+ if not pairs:
261
+ return token
262
+
263
+ while True:
264
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
265
+ if bigram not in self.bpe_ranks:
266
+ break
267
+ first, second = bigram
268
+ new_word = []
269
+ i = 0
270
+ while i < len(word):
271
+ try:
272
+ j = word.index(first, i)
273
+ except ValueError:
274
+ new_word.extend(word[i:])
275
+ break
276
+ else:
277
+ new_word.extend(word[i:j])
278
+ i = j
279
+
280
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
281
+ new_word.append(first + second)
282
+ i += 2
283
+ else:
284
+ new_word.append(word[i])
285
+ i += 1
286
+ new_word = tuple(new_word)
287
+ word = new_word
288
+ if len(word) == 1:
289
+ break
290
+ else:
291
+ pairs = get_pairs(word)
292
+ word = "@@ ".join(word)
293
+ word = word[:-4]
294
+ self.cache[token] = word
295
+ return word
296
+
297
+ def _tokenize(self, text):
298
+ """Tokenize a string."""
299
+ if self.normalization: # Perform Tweet normalization before performing BPE
300
+ text = self.normalizeTweet(text)
301
+
302
+ split_tokens = []
303
+ words = re.findall(r"\S+\n?", text)
304
+ for token in words:
305
+ split_tokens.extend(list(self.bpe(token).split(" ")))
306
+ return split_tokens
307
+
308
+ def normalizeTweet(self, tweet):
309
+ """
310
+ Normalize a raw Tweet
311
+ """
312
+ for punct in self.special_puncts:
313
+ tweet = tweet.replace(punct, self.special_puncts[punct])
314
+
315
+ tokens = self.tweetPreprocessor.tokenize(tweet)
316
+ normTweet = " ".join([self.normalizeToken(token) for token in tokens])
317
+
318
+ normTweet = (
319
+ normTweet.replace("cannot ", "can not ")
320
+ .replace("n't ", " n't ")
321
+ .replace("n 't ", " n't ")
322
+ .replace("ca n't", "can't")
323
+ .replace("ai n't", "ain't")
324
+ )
325
+ normTweet = (
326
+ normTweet.replace("'m ", " 'm ")
327
+ .replace("'re ", " 're ")
328
+ .replace("'s ", " 's ")
329
+ .replace("'ll ", " 'll ")
330
+ .replace("'d ", " 'd ")
331
+ .replace("'ve ", " 've ")
332
+ )
333
+ normTweet = (
334
+ normTweet.replace(" p . m .", " p.m.")
335
+ .replace(" p . m ", " p.m ")
336
+ .replace(" a . m .", " a.m.")
337
+ .replace(" a . m ", " a.m ")
338
+ )
339
+
340
+ return " ".join(normTweet.split())
341
+
342
+ def normalizeToken(self, token):
343
+ """
344
+ Normalize tokens in a Tweet
345
+ """
346
+ lowercased_token = token.lower()
347
+ if token.startswith("@"):
348
+ return "@USER"
349
+ elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
350
+ return "HTTPURL"
351
+ elif len(token) == 1:
352
+ if token in self.special_puncts:
353
+ return self.special_puncts[token]
354
+ if self.demojizer is not None:
355
+ return self.demojizer(token)
356
+ else:
357
+ return token
358
+ else:
359
+ return token
360
+
361
+ def _convert_token_to_id(self, token):
362
+ """Converts a token (str) in an id using the vocab."""
363
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
364
+
365
+ def _convert_id_to_token(self, index):
366
+ """Converts an index (integer) in a token (str) using the vocab."""
367
+ return self.decoder.get(index, self.unk_token)
368
+
369
+ def convert_tokens_to_string(self, tokens):
370
+ """Converts a sequence of tokens (string) in a single string."""
371
+ out_string = " ".join(tokens).replace("@@ ", "").strip()
372
+ return out_string
373
+
374
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
375
+ if not os.path.isdir(save_directory):
376
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
377
+ return
378
+ out_vocab_file = os.path.join(
379
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
380
+ )
381
+ out_merge_file = os.path.join(
382
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
383
+ )
384
+
385
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
386
+ copyfile(self.vocab_file, out_vocab_file)
387
+ elif not os.path.isfile(self.vocab_file):
388
+ with open(out_vocab_file, "wb") as fi:
389
+ content_spiece_model = self.sp_model.serialized_model_proto()
390
+ fi.write(content_spiece_model)
391
+
392
+ if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
393
+ copyfile(self.merges_file, out_merge_file)
394
+
395
+ return out_vocab_file, out_merge_file
396
+
397
+ # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
398
+ # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
399
+ # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
400
+ # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
401
+ # return ''.join(tokens_generated_so_far)
402
+
403
+ def add_from_file(self, f):
404
+ """
405
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
406
+ """
407
+ if isinstance(f, str):
408
+ try:
409
+ with open(f, "r", encoding="utf-8") as fd:
410
+ self.add_from_file(fd)
411
+ except FileNotFoundError as fnfe:
412
+ raise fnfe
413
+ except UnicodeError:
414
+ raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
415
+ return
416
+
417
+ lines = f.readlines()
418
+ for lineTmp in lines:
419
+ line = lineTmp.strip()
420
+ idx = line.rfind(" ")
421
+ if idx == -1:
422
+ raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
423
+ word = line[:idx]
424
+ self.encoder[word] = len(self.encoder)
425
+
426
+
427
+ # Natural Language Toolkit: Twitter Tokenizer
428
+ #
429
+ # Copyright (C) 2001-2020 NLTK Project
430
+ # Author: Christopher Potts <[email protected]>
431
+ # Ewan Klein <[email protected]> (modifications)
432
+ # Pierpaolo Pantone <> (modifications)
433
+ # URL: http://nltk.org/
434
+ # For license information, see LICENSE.TXT
435
+ #
436
+
437
+
438
+ """
439
+ Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
440
+
441
+ 1. The tuple regex_strings defines a list of regular expression strings.
442
+
443
+ 2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
444
+
445
+ 3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
446
+ the class Tokenizer.
447
+
448
+ 4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
449
+ is set to False, then the tokenizer will lowercase everything except for emoticons.
450
+
451
+ """
452
+
453
+
454
+ ######################################################################
455
+ #
456
+ # import regex # https://github.com/nltk/nltk/issues/2409
457
+ # import html
458
+ #
459
+ ######################################################################
460
+ # The following strings are components in the regular expression
461
+ # that is used for tokenizing. It's important that phone_number
462
+ # appears first in the final regex (since it can contain whitespace).
463
+ # It also could matter that tags comes after emoticons, due to the
464
+ # possibility of having text like
465
+ #
466
+ # <:| and some text >:)
467
+ #
468
+ # Most importantly, the final element should always be last, since it
469
+ # does a last ditch whitespace-based tokenization of whatever is left.
470
+
471
+ # ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
472
+
473
+ # This particular element is used in a couple ways, so we define it
474
+ # with a name:
475
+ # docstyle-ignore
476
+ EMOTICONS = r"""
477
+ (?:
478
+ [<>]?
479
+ [:;=8] # eyes
480
+ [\-o\*\']? # optional nose
481
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
482
+ |
483
+ [\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
484
+ [\-o\*\']? # optional nose
485
+ [:;=8] # eyes
486
+ [<>]?
487
+ |
488
+ <3 # heart
489
+ )"""
490
+
491
+ # URL pattern due to John Gruber, modified by Tom Winzig. See
492
+ # https://gist.github.com/winzig/8894715
493
+ # docstyle-ignore
494
+ URLS = r""" # Capture 1: entire matched URL
495
+ (?:
496
+ https?: # URL protocol and colon
497
+ (?:
498
+ /{1,3} # 1-3 slashes
499
+ | # or
500
+ [a-z0-9%] # Single letter or digit or '%'
501
+ # (Trying not to match e.g. "URI::Escape")
502
+ )
503
+ | # or
504
+ # looks like domain name followed by a slash:
505
+ [a-z0-9.\-]+[.]
506
+ (?:[a-z]{2,13})
507
+ /
508
+ )
509
+ (?: # One or more:
510
+ [^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
511
+ | # or
512
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
513
+ |
514
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
515
+ )+
516
+ (?: # End with:
517
+ \([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
518
+ |
519
+ \([^\s]+?\) # balanced parens, non-recursive: (...)
520
+ | # or
521
+ [^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
522
+ )
523
+ | # OR, the following to match naked domains:
524
+ (?:
525
+ (?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
526
+ [a-z0-9]+
527
+ (?:[.\-][a-z0-9]+)*
528
+ [.]
529
+ (?:[a-z]{2,13})
530
+ \b
531
+ /?
532
+ (?!@) # not succeeded by a @,
533
+ # avoid matching "foo.na" in "[email protected]"
534
+ )
535
+ """
536
+
537
+ # docstyle-ignore
538
+ # The components of the tokenizer:
539
+ REGEXPS = (
540
+ URLS,
541
+ # Phone numbers:
542
+ r"""
543
+ (?:
544
+ (?: # (international)
545
+ \+?[01]
546
+ [ *\-.\)]*
547
+ )?
548
+ (?: # (area code)
549
+ [\(]?
550
+ \d{3}
551
+ [ *\-.\)]*
552
+ )?
553
+ \d{3} # exchange
554
+ [ *\-.\)]*
555
+ \d{4} # base
556
+ )""",
557
+ # ASCII Emoticons
558
+ EMOTICONS,
559
+ # HTML tags:
560
+ r"""<[^>\s]+>""",
561
+ # ASCII Arrows
562
+ r"""[\-]+>|<[\-]+""",
563
+ # Twitter username:
564
+ r"""(?:@[\w_]+)""",
565
+ # Twitter hashtags:
566
+ r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
567
+ # email addresses
568
+ r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
569
+ # docstyle-ignore
570
+ # Remaining word types:
571
+ r"""
572
+ (?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
573
+ |
574
+ (?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
575
+ |
576
+ (?:[\w_]+) # Words without apostrophes or dashes.
577
+ |
578
+ (?:\.(?:\s*\.){1,}) # Ellipsis dots.
579
+ |
580
+ (?:\S) # Everything else that isn't whitespace.
581
+ """,
582
+ )
583
+
584
+ ######################################################################
585
+ # This is the core tokenizing regex:
586
+
587
+ WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
588
+
589
+ # WORD_RE performs poorly on these patterns:
590
+ HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
591
+
592
+ # The emoticon string gets its own regex so that we can preserve case for
593
+ # them as needed:
594
+ EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
595
+
596
+ # These are for regularizing HTML entities to Unicode:
597
+ ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
598
+
599
+
600
+ ######################################################################
601
+ # Functions for converting html entities
602
+ ######################################################################
603
+
604
+
605
+ def _str_to_unicode(text, encoding=None, errors="strict"):
606
+ if encoding is None:
607
+ encoding = "utf-8"
608
+ if isinstance(text, bytes):
609
+ return text.decode(encoding, errors)
610
+ return text
611
+
612
+
613
+ def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
614
+ """
615
+ Remove entities from text by converting them to their corresponding unicode character.
616
+
617
+ Args:
618
+ text:
619
+ A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
620
+ keep (list):
621
+ List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
622
+ `&#hhhh;`) and named entities (such as `&nbsp;` or `&gt;`).
623
+ remove_illegal (bool):
624
+ If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
625
+ kept "as is".
626
+
627
+ Returns: A unicode string with the entities removed.
628
+
629
+ See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
630
+
631
+ Examples:
632
+
633
+ ```python
634
+ >>> from nltk.tokenize.casual import _replace_html_entities
635
+
636
+ >>> _replace_html_entities(b"Price: &pound;100")
637
+ 'Price: \\xa3100'
638
+
639
+ >>> print(_replace_html_entities(b"Price: &pound;100"))
640
+ Price: £100
641
+ ```"""
642
+
643
+ def _convert_entity(match):
644
+ entity_body = match.group(3)
645
+ if match.group(1):
646
+ try:
647
+ if match.group(2):
648
+ number = int(entity_body, 16)
649
+ else:
650
+ number = int(entity_body, 10)
651
+ # Numeric character references in the 80-9F range are typically
652
+ # interpreted by browsers as representing the characters mapped
653
+ # to bytes 80-9F in the Windows-1252 encoding. For more info
654
+ # see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
655
+ if 0x80 <= number <= 0x9F:
656
+ return bytes((number,)).decode("cp1252")
657
+ except ValueError:
658
+ number = None
659
+ else:
660
+ if entity_body in keep:
661
+ return match.group(0)
662
+ else:
663
+ number = html.entities.name2codepoint.get(entity_body)
664
+ if number is not None:
665
+ try:
666
+ return chr(number)
667
+ except (ValueError, OverflowError):
668
+ pass
669
+
670
+ return "" if remove_illegal else match.group(0)
671
+
672
+ return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
673
+
674
+
675
+ ######################################################################
676
+
677
+
678
+ class TweetTokenizer:
679
+ r"""
680
+ Examples:
681
+
682
+ ```python
683
+ >>> # Tokenizer for tweets.
684
+ >>> from nltk.tokenize import TweetTokenizer
685
+
686
+ >>> tknzr = TweetTokenizer()
687
+ >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
688
+ >>> tknzr.tokenize(s0)
689
+ ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
690
+
691
+ >>> # Examples using *strip_handles* and *reduce_len parameters*:
692
+ >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
693
+ >>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
694
+ >>> tknzr.tokenize(s1)
695
+ [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
696
+ ```"""
697
+
698
+ def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
699
+ self.preserve_case = preserve_case
700
+ self.reduce_len = reduce_len
701
+ self.strip_handles = strip_handles
702
+
703
+ def tokenize(self, text):
704
+ """
705
+ Args:
706
+ text: str
707
+
708
+ Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
709
+ `preserve_case=False`
710
+ """
711
+ # Fix HTML character entities:
712
+ text = _replace_html_entities(text)
713
+ # Remove username handles
714
+ if self.strip_handles:
715
+ text = remove_handles(text)
716
+ # Normalize word lengthening
717
+ if self.reduce_len:
718
+ text = reduce_lengthening(text)
719
+ # Shorten problematic sequences of characters
720
+ safe_text = HANG_RE.sub(r"\1\1\1", text)
721
+ # Tokenize:
722
+ words = WORD_RE.findall(safe_text)
723
+ # Possibly alter the case, but avoid changing emoticons like :D into :d:
724
+ if not self.preserve_case:
725
+ words = [x if EMOTICON_RE.search(x) else x.lower() for x in words]
726
+ return words
727
+
728
+
729
+ ######################################################################
730
+ # Normalization Functions
731
+ ######################################################################
732
+
733
+
734
+ def reduce_lengthening(text):
735
+ """
736
+ Replace repeated character sequences of length 3 or greater with sequences of length 3.
737
+ """
738
+ pattern = regex.compile(r"(.)\1{2,}")
739
+ return pattern.sub(r"\1\1\1", text)
740
+
741
+
742
+ def remove_handles(text):
743
+ """
744
+ Remove Twitter username handles from text.
745
+ """
746
+ pattern = regex.compile(
747
+ r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
748
+ )
749
+ # Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
750
+ return pattern.sub(" ", text)
751
+
752
+
753
+ ######################################################################
754
+ # Tokenization Function
755
+ ######################################################################
756
+
757
+
758
+ def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
759
+ """
760
+ Convenience function for wrapping the tokenizer.
761
+ """
762
+ return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
763
+ text
764
+ )
765
+
766
+
767
+ ###############################################################################
venv/lib/python3.10/site-packages/transformers/models/ernie_m/__init__.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace and Baidu Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_ernie_m": ["ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieMConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_sentencepiece_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_ernie_m"] = ["ErnieMTokenizer"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_ernie_m"] = [
39
+ "ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "ErnieMForMultipleChoice",
41
+ "ErnieMForQuestionAnswering",
42
+ "ErnieMForSequenceClassification",
43
+ "ErnieMForTokenClassification",
44
+ "ErnieMModel",
45
+ "ErnieMPreTrainedModel",
46
+ "ErnieMForInformationExtraction",
47
+ ]
48
+
49
+
50
+ if TYPE_CHECKING:
51
+ from .configuration_ernie_m import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieMConfig
52
+
53
+ try:
54
+ if not is_sentencepiece_available():
55
+ raise OptionalDependencyNotAvailable()
56
+ except OptionalDependencyNotAvailable:
57
+ pass
58
+ else:
59
+ from .tokenization_ernie_m import ErnieMTokenizer
60
+
61
+ try:
62
+ if not is_torch_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .modeling_ernie_m import (
68
+ ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST,
69
+ ErnieMForInformationExtraction,
70
+ ErnieMForMultipleChoice,
71
+ ErnieMForQuestionAnswering,
72
+ ErnieMForSequenceClassification,
73
+ ErnieMForTokenClassification,
74
+ ErnieMModel,
75
+ ErnieMPreTrainedModel,
76
+ )
77
+
78
+
79
+ else:
80
+ import sys
81
+
82
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/ernie_m/__pycache__/configuration_ernie_m.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/ernie_m/configuration_ernie_m.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ErnieM model configuration"""
16
+ # Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py)
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import Dict
21
+
22
+ from ...configuration_utils import PretrainedConfig
23
+ from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
24
+
25
+
26
+ class ErnieMConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a
29
+ Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the `Ernie-M`
31
+ [susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture.
32
+
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 250002):
39
+ Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix.
40
+ Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling
41
+ [`ErnieMModel`].
42
+ hidden_size (`int`, *optional*, defaults to 768):
43
+ Dimensionality of the embedding layer, encoder layers and pooler layer.
44
+ num_hidden_layers (`int`, *optional*, defaults to 12):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 12):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ intermediate_size (`int`, *optional*, defaults to 3072):
49
+ Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are
50
+ firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically
51
+ intermediate_size is larger than hidden_size.
52
+ hidden_act (`str`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch
54
+ supported activation functions are supported.
55
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings and encoder.
57
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
58
+ The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target.
59
+ max_position_embeddings (`int`, *optional*, defaults to 514):
60
+ The maximum value of the dimensionality of position encoding, which dictates the maximum supported length
61
+ of an input sequence.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the normal initializer for initializing all weight matrices. The index of padding
64
+ token in the token vocabulary.
65
+ pad_token_id (`int`, *optional*, defaults to 1):
66
+ Padding token id.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the layer normalization layers.
69
+ classifier_dropout (`float`, *optional*):
70
+ The dropout ratio for the classification head.
71
+ act_dropout (`float`, *optional*, defaults to 0.0):
72
+ This dropout probability is used in `ErnieMEncoderLayer` after activation.
73
+
74
+ A normal_initializer initializes weight matrices as normal distributions. See
75
+ `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`.
76
+ """
77
+
78
+ model_type = "ernie_m"
79
+ attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
80
+
81
+ def __init__(
82
+ self,
83
+ vocab_size: int = 250002,
84
+ hidden_size: int = 768,
85
+ num_hidden_layers: int = 12,
86
+ num_attention_heads: int = 12,
87
+ intermediate_size: int = 3072,
88
+ hidden_act: str = "gelu",
89
+ hidden_dropout_prob: float = 0.1,
90
+ attention_probs_dropout_prob: float = 0.1,
91
+ max_position_embeddings: int = 514,
92
+ initializer_range: float = 0.02,
93
+ pad_token_id: int = 1,
94
+ layer_norm_eps: float = 1e-05,
95
+ classifier_dropout=None,
96
+ act_dropout=0.0,
97
+ **kwargs,
98
+ ):
99
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
100
+ self.vocab_size = vocab_size
101
+ self.hidden_size = hidden_size
102
+ self.num_hidden_layers = num_hidden_layers
103
+ self.num_attention_heads = num_attention_heads
104
+ self.intermediate_size = intermediate_size
105
+ self.hidden_act = hidden_act
106
+ self.hidden_dropout_prob = hidden_dropout_prob
107
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
108
+ self.max_position_embeddings = max_position_embeddings
109
+ self.initializer_range = initializer_range
110
+ self.layer_norm_eps = layer_norm_eps
111
+ self.classifier_dropout = classifier_dropout
112
+ self.act_dropout = act_dropout
venv/lib/python3.10/site-packages/transformers/models/ernie_m/modeling_ernie_m.py ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ErnieM model."""
16
+
17
+
18
+ import math
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn, tensor
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ MultipleChoiceModelOutput,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutput,
33
+ TokenClassifierOutput,
34
+ )
35
+ from ...modeling_utils import PreTrainedModel
36
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
37
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
38
+ from .configuration_ernie_m import ErnieMConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "susnato/ernie-m-base_pytorch"
44
+ _CONFIG_FOR_DOC = "ErnieMConfig"
45
+ _TOKENIZER_FOR_DOC = "ErnieMTokenizer"
46
+
47
+
48
+ from ..deprecated._archive_maps import ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ # Adapted from paddlenlp.transformers.ernie_m.modeling.ErnieEmbeddings
52
+ class ErnieMEmbeddings(nn.Module):
53
+ """Construct the embeddings from word and position embeddings."""
54
+
55
+ def __init__(self, config):
56
+ super().__init__()
57
+ self.hidden_size = config.hidden_size
58
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
59
+ self.position_embeddings = nn.Embedding(
60
+ config.max_position_embeddings, config.hidden_size, padding_idx=config.pad_token_id
61
+ )
62
+ self.layer_norm = nn.LayerNorm(normalized_shape=config.hidden_size, eps=config.layer_norm_eps)
63
+ self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
64
+ self.padding_idx = config.pad_token_id
65
+
66
+ def forward(
67
+ self,
68
+ input_ids: Optional[torch.LongTensor] = None,
69
+ position_ids: Optional[torch.LongTensor] = None,
70
+ inputs_embeds: Optional[torch.LongTensor] = None,
71
+ past_key_values_length: int = 0,
72
+ ) -> torch.Tensor:
73
+ if inputs_embeds is None:
74
+ inputs_embeds = self.word_embeddings(input_ids)
75
+ if position_ids is None:
76
+ input_shape = inputs_embeds.size()[:-1]
77
+ ones = torch.ones(input_shape, dtype=torch.int64, device=inputs_embeds.device)
78
+ seq_length = torch.cumsum(ones, dim=1)
79
+ position_ids = seq_length - ones
80
+
81
+ if past_key_values_length > 0:
82
+ position_ids = position_ids + past_key_values_length
83
+ # to mimic paddlenlp implementation
84
+ position_ids += 2
85
+ position_embeddings = self.position_embeddings(position_ids)
86
+ embeddings = inputs_embeds + position_embeddings
87
+ embeddings = self.layer_norm(embeddings)
88
+ embeddings = self.dropout(embeddings)
89
+
90
+ return embeddings
91
+
92
+
93
+ # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ErnieM,self.value->self.v_proj,self.key->self.k_proj,self.query->self.q_proj
94
+ class ErnieMSelfAttention(nn.Module):
95
+ def __init__(self, config, position_embedding_type=None):
96
+ super().__init__()
97
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
98
+ raise ValueError(
99
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
100
+ f"heads ({config.num_attention_heads})"
101
+ )
102
+
103
+ self.num_attention_heads = config.num_attention_heads
104
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
105
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
106
+
107
+ self.q_proj = nn.Linear(config.hidden_size, self.all_head_size)
108
+ self.k_proj = nn.Linear(config.hidden_size, self.all_head_size)
109
+ self.v_proj = nn.Linear(config.hidden_size, self.all_head_size)
110
+
111
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
112
+ self.position_embedding_type = position_embedding_type or getattr(
113
+ config, "position_embedding_type", "absolute"
114
+ )
115
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
116
+ self.max_position_embeddings = config.max_position_embeddings
117
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
118
+
119
+ self.is_decoder = config.is_decoder
120
+
121
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
122
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
123
+ x = x.view(new_x_shape)
124
+ return x.permute(0, 2, 1, 3)
125
+
126
+ def forward(
127
+ self,
128
+ hidden_states: torch.Tensor,
129
+ attention_mask: Optional[torch.FloatTensor] = None,
130
+ head_mask: Optional[torch.FloatTensor] = None,
131
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
132
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
133
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
134
+ output_attentions: Optional[bool] = False,
135
+ ) -> Tuple[torch.Tensor]:
136
+ mixed_query_layer = self.q_proj(hidden_states)
137
+
138
+ # If this is instantiated as a cross-attention module, the keys
139
+ # and values come from an encoder; the attention mask needs to be
140
+ # such that the encoder's padding tokens are not attended to.
141
+ is_cross_attention = encoder_hidden_states is not None
142
+
143
+ if is_cross_attention and past_key_value is not None:
144
+ # reuse k,v, cross_attentions
145
+ key_layer = past_key_value[0]
146
+ value_layer = past_key_value[1]
147
+ attention_mask = encoder_attention_mask
148
+ elif is_cross_attention:
149
+ key_layer = self.transpose_for_scores(self.k_proj(encoder_hidden_states))
150
+ value_layer = self.transpose_for_scores(self.v_proj(encoder_hidden_states))
151
+ attention_mask = encoder_attention_mask
152
+ elif past_key_value is not None:
153
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
154
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
155
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
156
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
157
+ else:
158
+ key_layer = self.transpose_for_scores(self.k_proj(hidden_states))
159
+ value_layer = self.transpose_for_scores(self.v_proj(hidden_states))
160
+
161
+ query_layer = self.transpose_for_scores(mixed_query_layer)
162
+
163
+ use_cache = past_key_value is not None
164
+ if self.is_decoder:
165
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
166
+ # Further calls to cross_attention layer can then reuse all cross-attention
167
+ # key/value_states (first "if" case)
168
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
169
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
170
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
171
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
172
+ past_key_value = (key_layer, value_layer)
173
+
174
+ # Take the dot product between "query" and "key" to get the raw attention scores.
175
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
176
+
177
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
178
+ query_length, key_length = query_layer.shape[2], key_layer.shape[2]
179
+ if use_cache:
180
+ position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
181
+ -1, 1
182
+ )
183
+ else:
184
+ position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
185
+ position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
186
+ distance = position_ids_l - position_ids_r
187
+
188
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
189
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
190
+
191
+ if self.position_embedding_type == "relative_key":
192
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
193
+ attention_scores = attention_scores + relative_position_scores
194
+ elif self.position_embedding_type == "relative_key_query":
195
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
196
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
197
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
198
+
199
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
200
+ if attention_mask is not None:
201
+ # Apply the attention mask is (precomputed for all layers in ErnieMModel forward() function)
202
+ attention_scores = attention_scores + attention_mask
203
+
204
+ # Normalize the attention scores to probabilities.
205
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
206
+
207
+ # This is actually dropping out entire tokens to attend to, which might
208
+ # seem a bit unusual, but is taken from the original Transformer paper.
209
+ attention_probs = self.dropout(attention_probs)
210
+
211
+ # Mask heads if we want to
212
+ if head_mask is not None:
213
+ attention_probs = attention_probs * head_mask
214
+
215
+ context_layer = torch.matmul(attention_probs, value_layer)
216
+
217
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
218
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
219
+ context_layer = context_layer.view(new_context_layer_shape)
220
+
221
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
222
+
223
+ if self.is_decoder:
224
+ outputs = outputs + (past_key_value,)
225
+ return outputs
226
+
227
+
228
+ class ErnieMAttention(nn.Module):
229
+ def __init__(self, config, position_embedding_type=None):
230
+ super().__init__()
231
+ self.self_attn = ErnieMSelfAttention(config, position_embedding_type=position_embedding_type)
232
+ self.out_proj = nn.Linear(config.hidden_size, config.hidden_size)
233
+ self.pruned_heads = set()
234
+
235
+ def prune_heads(self, heads):
236
+ if len(heads) == 0:
237
+ return
238
+ heads, index = find_pruneable_heads_and_indices(
239
+ heads, self.self_attn.num_attention_heads, self.self_attn.attention_head_size, self.pruned_heads
240
+ )
241
+
242
+ # Prune linear layers
243
+ self.self_attn.q_proj = prune_linear_layer(self.self_attn.q_proj, index)
244
+ self.self_attn.k_proj = prune_linear_layer(self.self_attn.k_proj, index)
245
+ self.self_attn.v_proj = prune_linear_layer(self.self_attn.v_proj, index)
246
+ self.out_proj = prune_linear_layer(self.out_proj, index, dim=1)
247
+
248
+ # Update hyper params and store pruned heads
249
+ self.self_attn.num_attention_heads = self.self_attn.num_attention_heads - len(heads)
250
+ self.self_attn.all_head_size = self.self_attn.attention_head_size * self.self_attn.num_attention_heads
251
+ self.pruned_heads = self.pruned_heads.union(heads)
252
+
253
+ def forward(
254
+ self,
255
+ hidden_states: torch.Tensor,
256
+ attention_mask: Optional[torch.FloatTensor] = None,
257
+ head_mask: Optional[torch.FloatTensor] = None,
258
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
259
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
260
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
261
+ output_attentions: Optional[bool] = False,
262
+ ) -> Tuple[torch.Tensor]:
263
+ self_outputs = self.self_attn(
264
+ hidden_states,
265
+ attention_mask,
266
+ head_mask,
267
+ encoder_hidden_states,
268
+ encoder_attention_mask,
269
+ past_key_value,
270
+ output_attentions,
271
+ )
272
+ attention_output = self.out_proj(self_outputs[0])
273
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
274
+ return outputs
275
+
276
+
277
+ class ErnieMEncoderLayer(nn.Module):
278
+ def __init__(self, config):
279
+ super().__init__()
280
+ # to mimic paddlenlp implementation
281
+ dropout = 0.1 if config.hidden_dropout_prob is None else config.hidden_dropout_prob
282
+ act_dropout = config.hidden_dropout_prob if config.act_dropout is None else config.act_dropout
283
+
284
+ self.self_attn = ErnieMAttention(config)
285
+ self.linear1 = nn.Linear(config.hidden_size, config.intermediate_size)
286
+ self.dropout = nn.Dropout(act_dropout)
287
+ self.linear2 = nn.Linear(config.intermediate_size, config.hidden_size)
288
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
289
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
290
+ self.dropout1 = nn.Dropout(dropout)
291
+ self.dropout2 = nn.Dropout(dropout)
292
+ if isinstance(config.hidden_act, str):
293
+ self.activation = ACT2FN[config.hidden_act]
294
+ else:
295
+ self.activation = config.hidden_act
296
+
297
+ def forward(
298
+ self,
299
+ hidden_states: torch.Tensor,
300
+ attention_mask: Optional[torch.FloatTensor] = None,
301
+ head_mask: Optional[torch.FloatTensor] = None,
302
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
303
+ output_attentions: Optional[bool] = True,
304
+ ):
305
+ residual = hidden_states
306
+ if output_attentions:
307
+ hidden_states, attention_opt_weights = self.self_attn(
308
+ hidden_states=hidden_states,
309
+ attention_mask=attention_mask,
310
+ head_mask=head_mask,
311
+ past_key_value=past_key_value,
312
+ output_attentions=output_attentions,
313
+ )
314
+
315
+ else:
316
+ hidden_states = self.self_attn(
317
+ hidden_states=hidden_states,
318
+ attention_mask=attention_mask,
319
+ head_mask=head_mask,
320
+ past_key_value=past_key_value,
321
+ output_attentions=output_attentions,
322
+ )
323
+ hidden_states = residual + self.dropout1(hidden_states)
324
+ hidden_states = self.norm1(hidden_states)
325
+ residual = hidden_states
326
+
327
+ hidden_states = self.linear1(hidden_states)
328
+ hidden_states = self.activation(hidden_states)
329
+ hidden_states = self.dropout(hidden_states)
330
+ hidden_states = self.linear2(hidden_states)
331
+ hidden_states = residual + self.dropout2(hidden_states)
332
+ hidden_states = self.norm2(hidden_states)
333
+
334
+ if output_attentions:
335
+ return hidden_states, attention_opt_weights
336
+ else:
337
+ return hidden_states
338
+
339
+
340
+ class ErnieMEncoder(nn.Module):
341
+ def __init__(self, config):
342
+ super().__init__()
343
+ self.config = config
344
+ self.layers = nn.ModuleList([ErnieMEncoderLayer(config) for _ in range(config.num_hidden_layers)])
345
+
346
+ def forward(
347
+ self,
348
+ input_embeds: torch.Tensor,
349
+ attention_mask: Optional[torch.FloatTensor] = None,
350
+ head_mask: Optional[torch.FloatTensor] = None,
351
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
352
+ output_attentions: Optional[bool] = False,
353
+ output_hidden_states: Optional[bool] = False,
354
+ return_dict: Optional[bool] = True,
355
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
356
+ hidden_states = () if output_hidden_states else None
357
+ attentions = () if output_attentions else None
358
+
359
+ output = input_embeds
360
+ if output_hidden_states:
361
+ hidden_states = hidden_states + (output,)
362
+ for i, layer in enumerate(self.layers):
363
+ layer_head_mask = head_mask[i] if head_mask is not None else None
364
+ past_key_value = past_key_values[i] if past_key_values is not None else None
365
+
366
+ output, opt_attn_weights = layer(
367
+ hidden_states=output,
368
+ attention_mask=attention_mask,
369
+ head_mask=layer_head_mask,
370
+ past_key_value=past_key_value,
371
+ )
372
+
373
+ if output_hidden_states:
374
+ hidden_states = hidden_states + (output,)
375
+ if output_attentions:
376
+ attentions = attentions + (opt_attn_weights,)
377
+
378
+ last_hidden_state = output
379
+ if not return_dict:
380
+ return tuple(v for v in [last_hidden_state, hidden_states, attentions] if v is not None)
381
+
382
+ return BaseModelOutputWithPastAndCrossAttentions(
383
+ last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=attentions
384
+ )
385
+
386
+
387
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ErnieM
388
+ class ErnieMPooler(nn.Module):
389
+ def __init__(self, config):
390
+ super().__init__()
391
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
392
+ self.activation = nn.Tanh()
393
+
394
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
395
+ # We "pool" the model by simply taking the hidden state corresponding
396
+ # to the first token.
397
+ first_token_tensor = hidden_states[:, 0]
398
+ pooled_output = self.dense(first_token_tensor)
399
+ pooled_output = self.activation(pooled_output)
400
+ return pooled_output
401
+
402
+
403
+ class ErnieMPreTrainedModel(PreTrainedModel):
404
+ """
405
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
406
+ models.
407
+ """
408
+
409
+ config_class = ErnieMConfig
410
+ base_model_prefix = "ernie_m"
411
+
412
+ def _init_weights(self, module):
413
+ """Initialize the weights"""
414
+ if isinstance(module, nn.Linear):
415
+ # Slightly different from the TF version which uses truncated_normal for initialization
416
+ # cf https://github.com/pytorch/pytorch/pull/5617
417
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
418
+ if module.bias is not None:
419
+ module.bias.data.zero_()
420
+ elif isinstance(module, nn.Embedding):
421
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
422
+ if module.padding_idx is not None:
423
+ module.weight.data[module.padding_idx].zero_()
424
+ elif isinstance(module, nn.LayerNorm):
425
+ module.bias.data.zero_()
426
+ module.weight.data.fill_(1.0)
427
+
428
+
429
+ ERNIE_M_START_DOCSTRING = r"""
430
+
431
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
432
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
433
+ etc.)
434
+
435
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
436
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
437
+ behavior.
438
+
439
+ Parameters:
440
+ config ([`ErnieMConfig`]): Model configuration class with all the parameters of the model.
441
+ Initializing with a config file does not load the weights associated with the model, only the
442
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
443
+ """
444
+
445
+ ERNIE_M_INPUTS_DOCSTRING = r"""
446
+ Args:
447
+ input_ids (`torch.LongTensor` of shape `({0})`):
448
+ Indices of input sequence tokens in the vocabulary.
449
+
450
+ Indices can be obtained using [`ErnieMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
451
+ [`PreTrainedTokenizer.__call__`] for details.
452
+
453
+ [What are input IDs?](../glossary#input-ids)
454
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
455
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
456
+
457
+ - 1 for tokens that are **not masked**,
458
+ - 0 for tokens that are **masked**.
459
+
460
+ [What are attention masks?](../glossary#attention-mask)
461
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
462
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
463
+ config.max_position_embeddings - 1]`.
464
+
465
+ [What are position IDs?](../glossary#position-ids)
466
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
467
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
468
+
469
+ - 1 indicates the head is **not masked**,
470
+ - 0 indicates the head is **masked**.
471
+
472
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
473
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
474
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
475
+ model's internal embedding lookup matrix.
476
+ output_attentions (`bool`, *optional*):
477
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
478
+ tensors for more detail.
479
+ output_hidden_states (`bool`, *optional*):
480
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
481
+ more detail.
482
+ return_dict (`bool`, *optional*):
483
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
484
+ """
485
+
486
+
487
+ @add_start_docstrings(
488
+ "The bare ErnieM Model transformer outputting raw hidden-states without any specific head on top.",
489
+ ERNIE_M_START_DOCSTRING,
490
+ )
491
+ class ErnieMModel(ErnieMPreTrainedModel):
492
+ def __init__(self, config, add_pooling_layer=True):
493
+ super(ErnieMModel, self).__init__(config)
494
+ self.initializer_range = config.initializer_range
495
+ self.embeddings = ErnieMEmbeddings(config)
496
+ self.encoder = ErnieMEncoder(config)
497
+ self.pooler = ErnieMPooler(config) if add_pooling_layer else None
498
+ self.post_init()
499
+
500
+ def get_input_embeddings(self):
501
+ return self.embeddings.word_embeddings
502
+
503
+ def set_input_embeddings(self, value):
504
+ self.embeddings.word_embeddings = value
505
+
506
+ def _prune_heads(self, heads_to_prune):
507
+ """
508
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
509
+ class PreTrainedModel
510
+ """
511
+ for layer, heads in heads_to_prune.items():
512
+ self.encoder.layers[layer].self_attn.prune_heads(heads)
513
+
514
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
515
+ @add_code_sample_docstrings(
516
+ processor_class=_TOKENIZER_FOR_DOC,
517
+ checkpoint=_CHECKPOINT_FOR_DOC,
518
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
519
+ config_class=_CONFIG_FOR_DOC,
520
+ )
521
+ def forward(
522
+ self,
523
+ input_ids: Optional[tensor] = None,
524
+ position_ids: Optional[tensor] = None,
525
+ attention_mask: Optional[tensor] = None,
526
+ head_mask: Optional[tensor] = None,
527
+ inputs_embeds: Optional[tensor] = None,
528
+ past_key_values: Optional[Tuple[Tuple[tensor]]] = None,
529
+ use_cache: Optional[bool] = None,
530
+ output_hidden_states: Optional[bool] = None,
531
+ output_attentions: Optional[bool] = None,
532
+ return_dict: Optional[bool] = None,
533
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
534
+ if input_ids is not None and inputs_embeds is not None:
535
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.")
536
+
537
+ # init the default bool value
538
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
539
+ output_hidden_states = (
540
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
541
+ )
542
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
543
+
544
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
545
+
546
+ past_key_values_length = 0
547
+ if past_key_values is not None:
548
+ past_key_values_length = past_key_values[0][0].shape[2]
549
+
550
+ # Adapted from paddlenlp.transformers.ernie_m.ErnieMModel
551
+ if attention_mask is None:
552
+ attention_mask = (input_ids == self.config.pad_token_id).to(torch.float32)
553
+ attention_mask *= torch.finfo(attention_mask.dtype).min
554
+ if past_key_values is not None:
555
+ batch_size = past_key_values[0][0].shape[0]
556
+ past_mask = torch.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype)
557
+ attention_mask = torch.concat([past_mask, attention_mask], dim=-1)
558
+ # For 2D attention_mask from tokenizer
559
+ elif attention_mask.ndim == 2:
560
+ attention_mask = attention_mask.to(torch.float32)
561
+ attention_mask = 1.0 - attention_mask
562
+ attention_mask *= torch.finfo(attention_mask.dtype).min
563
+
564
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
565
+
566
+ embedding_output = self.embeddings(
567
+ input_ids=input_ids,
568
+ position_ids=position_ids,
569
+ inputs_embeds=inputs_embeds,
570
+ past_key_values_length=past_key_values_length,
571
+ )
572
+ encoder_outputs = self.encoder(
573
+ embedding_output,
574
+ attention_mask=extended_attention_mask,
575
+ head_mask=head_mask,
576
+ past_key_values=past_key_values,
577
+ output_attentions=output_attentions,
578
+ output_hidden_states=output_hidden_states,
579
+ return_dict=return_dict,
580
+ )
581
+
582
+ if not return_dict:
583
+ sequence_output = encoder_outputs[0]
584
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
585
+ return (sequence_output, pooler_output) + encoder_outputs[1:]
586
+
587
+ sequence_output = encoder_outputs["last_hidden_state"]
588
+ pooler_output = self.pooler(sequence_output) if self.pooler is not None else None
589
+ hidden_states = None if not output_hidden_states else encoder_outputs["hidden_states"]
590
+ attentions = None if not output_attentions else encoder_outputs["attentions"]
591
+
592
+ return BaseModelOutputWithPoolingAndCrossAttentions(
593
+ last_hidden_state=sequence_output,
594
+ pooler_output=pooler_output,
595
+ hidden_states=hidden_states,
596
+ attentions=attentions,
597
+ )
598
+
599
+
600
+ @add_start_docstrings(
601
+ """ErnieM Model transformer with a sequence classification/regression head on top (a linear layer on top of
602
+ the pooled output) e.g. for GLUE tasks.""",
603
+ ERNIE_M_START_DOCSTRING,
604
+ )
605
+ class ErnieMForSequenceClassification(ErnieMPreTrainedModel):
606
+ # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->ErnieM,bert->ernie_m
607
+ def __init__(self, config):
608
+ super().__init__(config)
609
+ self.num_labels = config.num_labels
610
+ self.config = config
611
+
612
+ self.ernie_m = ErnieMModel(config)
613
+ classifier_dropout = (
614
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
615
+ )
616
+ self.dropout = nn.Dropout(classifier_dropout)
617
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
618
+
619
+ # Initialize weights and apply final processing
620
+ self.post_init()
621
+
622
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
623
+ @add_code_sample_docstrings(
624
+ processor_class=_TOKENIZER_FOR_DOC,
625
+ checkpoint=_CHECKPOINT_FOR_DOC,
626
+ output_type=SequenceClassifierOutput,
627
+ config_class=_CONFIG_FOR_DOC,
628
+ )
629
+ def forward(
630
+ self,
631
+ input_ids: Optional[torch.Tensor] = None,
632
+ attention_mask: Optional[torch.Tensor] = None,
633
+ position_ids: Optional[torch.Tensor] = None,
634
+ head_mask: Optional[torch.Tensor] = None,
635
+ inputs_embeds: Optional[torch.Tensor] = None,
636
+ past_key_values: Optional[List[torch.Tensor]] = None,
637
+ use_cache: Optional[bool] = None,
638
+ output_hidden_states: Optional[bool] = None,
639
+ output_attentions: Optional[bool] = None,
640
+ return_dict: Optional[bool] = True,
641
+ labels: Optional[torch.Tensor] = None,
642
+ ) -> Union[Tuple[torch.FloatTensor], SequenceClassifierOutput]:
643
+ r"""
644
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
645
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
646
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
647
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
648
+ """
649
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
650
+
651
+ outputs = self.ernie_m(
652
+ input_ids,
653
+ attention_mask=attention_mask,
654
+ position_ids=position_ids,
655
+ head_mask=head_mask,
656
+ inputs_embeds=inputs_embeds,
657
+ past_key_values=past_key_values,
658
+ output_hidden_states=output_hidden_states,
659
+ output_attentions=output_attentions,
660
+ return_dict=return_dict,
661
+ )
662
+
663
+ pooled_output = outputs[1]
664
+
665
+ pooled_output = self.dropout(pooled_output)
666
+ logits = self.classifier(pooled_output)
667
+
668
+ loss = None
669
+ if labels is not None:
670
+ if self.config.problem_type is None:
671
+ if self.num_labels == 1:
672
+ self.config.problem_type = "regression"
673
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
674
+ self.config.problem_type = "single_label_classification"
675
+ else:
676
+ self.config.problem_type = "multi_label_classification"
677
+
678
+ if self.config.problem_type == "regression":
679
+ loss_fct = MSELoss()
680
+ if self.num_labels == 1:
681
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
682
+ else:
683
+ loss = loss_fct(logits, labels)
684
+ elif self.config.problem_type == "single_label_classification":
685
+ loss_fct = CrossEntropyLoss()
686
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
687
+ elif self.config.problem_type == "multi_label_classification":
688
+ loss_fct = BCEWithLogitsLoss()
689
+ loss = loss_fct(logits, labels)
690
+ if not return_dict:
691
+ output = (logits,) + outputs[2:]
692
+ return ((loss,) + output) if loss is not None else output
693
+
694
+ return SequenceClassifierOutput(
695
+ loss=loss,
696
+ logits=logits,
697
+ hidden_states=outputs.hidden_states,
698
+ attentions=outputs.attentions,
699
+ )
700
+
701
+
702
+ @add_start_docstrings(
703
+ """ErnieM Model with a multiple choice classification head on top (a linear layer on top of
704
+ the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
705
+ ERNIE_M_START_DOCSTRING,
706
+ )
707
+ class ErnieMForMultipleChoice(ErnieMPreTrainedModel):
708
+ # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->ErnieM,bert->ernie_m
709
+ def __init__(self, config):
710
+ super().__init__(config)
711
+
712
+ self.ernie_m = ErnieMModel(config)
713
+ classifier_dropout = (
714
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
715
+ )
716
+ self.dropout = nn.Dropout(classifier_dropout)
717
+ self.classifier = nn.Linear(config.hidden_size, 1)
718
+
719
+ # Initialize weights and apply final processing
720
+ self.post_init()
721
+
722
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
723
+ @add_code_sample_docstrings(
724
+ checkpoint=_CHECKPOINT_FOR_DOC,
725
+ output_type=MultipleChoiceModelOutput,
726
+ config_class=_CONFIG_FOR_DOC,
727
+ )
728
+ def forward(
729
+ self,
730
+ input_ids: Optional[torch.Tensor] = None,
731
+ attention_mask: Optional[torch.Tensor] = None,
732
+ position_ids: Optional[torch.Tensor] = None,
733
+ head_mask: Optional[torch.Tensor] = None,
734
+ inputs_embeds: Optional[torch.Tensor] = None,
735
+ labels: Optional[torch.Tensor] = None,
736
+ output_attentions: Optional[bool] = None,
737
+ output_hidden_states: Optional[bool] = None,
738
+ return_dict: Optional[bool] = True,
739
+ ) -> Union[Tuple[torch.FloatTensor], MultipleChoiceModelOutput]:
740
+ r"""
741
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
742
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
743
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
744
+ `input_ids` above)
745
+ """
746
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
747
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
748
+
749
+ input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
750
+ attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
751
+ position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
752
+ inputs_embeds = (
753
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
754
+ if inputs_embeds is not None
755
+ else None
756
+ )
757
+
758
+ outputs = self.ernie_m(
759
+ input_ids,
760
+ attention_mask=attention_mask,
761
+ position_ids=position_ids,
762
+ head_mask=head_mask,
763
+ inputs_embeds=inputs_embeds,
764
+ output_attentions=output_attentions,
765
+ output_hidden_states=output_hidden_states,
766
+ return_dict=return_dict,
767
+ )
768
+
769
+ pooled_output = outputs[1]
770
+
771
+ pooled_output = self.dropout(pooled_output)
772
+ logits = self.classifier(pooled_output)
773
+ reshaped_logits = logits.view(-1, num_choices)
774
+
775
+ loss = None
776
+ if labels is not None:
777
+ loss_fct = CrossEntropyLoss()
778
+ loss = loss_fct(reshaped_logits, labels)
779
+
780
+ if not return_dict:
781
+ output = (reshaped_logits,) + outputs[2:]
782
+ return ((loss,) + output) if loss is not None else output
783
+
784
+ return MultipleChoiceModelOutput(
785
+ loss=loss,
786
+ logits=reshaped_logits,
787
+ hidden_states=outputs.hidden_states,
788
+ attentions=outputs.attentions,
789
+ )
790
+
791
+
792
+ @add_start_docstrings(
793
+ """ErnieM Model with a token classification head on top (a linear layer on top of
794
+ the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
795
+ ERNIE_M_START_DOCSTRING,
796
+ )
797
+ class ErnieMForTokenClassification(ErnieMPreTrainedModel):
798
+ # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->ErnieM,bert->ernie_m
799
+ def __init__(self, config):
800
+ super().__init__(config)
801
+ self.num_labels = config.num_labels
802
+
803
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
804
+ classifier_dropout = (
805
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
806
+ )
807
+ self.dropout = nn.Dropout(classifier_dropout)
808
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
809
+
810
+ # Initialize weights and apply final processing
811
+ self.post_init()
812
+
813
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
814
+ @add_code_sample_docstrings(
815
+ processor_class=_TOKENIZER_FOR_DOC,
816
+ checkpoint=_CHECKPOINT_FOR_DOC,
817
+ output_type=TokenClassifierOutput,
818
+ config_class=_CONFIG_FOR_DOC,
819
+ )
820
+ def forward(
821
+ self,
822
+ input_ids: Optional[torch.Tensor] = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ position_ids: Optional[torch.Tensor] = None,
825
+ head_mask: Optional[torch.Tensor] = None,
826
+ inputs_embeds: Optional[torch.Tensor] = None,
827
+ past_key_values: Optional[List[torch.Tensor]] = None,
828
+ output_hidden_states: Optional[bool] = None,
829
+ output_attentions: Optional[bool] = None,
830
+ return_dict: Optional[bool] = True,
831
+ labels: Optional[torch.Tensor] = None,
832
+ ) -> Union[Tuple[torch.FloatTensor], TokenClassifierOutput]:
833
+ r"""
834
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
835
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
836
+ """
837
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
838
+
839
+ outputs = self.ernie_m(
840
+ input_ids,
841
+ attention_mask=attention_mask,
842
+ position_ids=position_ids,
843
+ head_mask=head_mask,
844
+ inputs_embeds=inputs_embeds,
845
+ past_key_values=past_key_values,
846
+ output_attentions=output_attentions,
847
+ output_hidden_states=output_hidden_states,
848
+ return_dict=return_dict,
849
+ )
850
+
851
+ sequence_output = outputs[0]
852
+
853
+ sequence_output = self.dropout(sequence_output)
854
+ logits = self.classifier(sequence_output)
855
+
856
+ loss = None
857
+ if labels is not None:
858
+ loss_fct = CrossEntropyLoss()
859
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
860
+
861
+ if not return_dict:
862
+ output = (logits,) + outputs[2:]
863
+ return ((loss,) + output) if loss is not None else output
864
+
865
+ return TokenClassifierOutput(
866
+ loss=loss,
867
+ logits=logits,
868
+ hidden_states=outputs.hidden_states,
869
+ attentions=outputs.attentions,
870
+ )
871
+
872
+
873
+ @add_start_docstrings(
874
+ """ErnieM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
875
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
876
+ ERNIE_M_START_DOCSTRING,
877
+ )
878
+ class ErnieMForQuestionAnswering(ErnieMPreTrainedModel):
879
+ # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->ErnieM,bert->ernie_m
880
+ def __init__(self, config):
881
+ super().__init__(config)
882
+ self.num_labels = config.num_labels
883
+
884
+ self.ernie_m = ErnieMModel(config, add_pooling_layer=False)
885
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
886
+
887
+ # Initialize weights and apply final processing
888
+ self.post_init()
889
+
890
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
891
+ @add_code_sample_docstrings(
892
+ processor_class=_TOKENIZER_FOR_DOC,
893
+ checkpoint=_CHECKPOINT_FOR_DOC,
894
+ output_type=QuestionAnsweringModelOutput,
895
+ config_class=_CONFIG_FOR_DOC,
896
+ )
897
+ def forward(
898
+ self,
899
+ input_ids: Optional[torch.Tensor] = None,
900
+ attention_mask: Optional[torch.Tensor] = None,
901
+ position_ids: Optional[torch.Tensor] = None,
902
+ head_mask: Optional[torch.Tensor] = None,
903
+ inputs_embeds: Optional[torch.Tensor] = None,
904
+ start_positions: Optional[torch.Tensor] = None,
905
+ end_positions: Optional[torch.Tensor] = None,
906
+ output_attentions: Optional[bool] = None,
907
+ output_hidden_states: Optional[bool] = None,
908
+ return_dict: Optional[bool] = True,
909
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
910
+ r"""
911
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
912
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
913
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
914
+ are not taken into account for computing the loss.
915
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
916
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
917
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
918
+ are not taken into account for computing the loss.
919
+ """
920
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
921
+
922
+ outputs = self.ernie_m(
923
+ input_ids,
924
+ attention_mask=attention_mask,
925
+ position_ids=position_ids,
926
+ head_mask=head_mask,
927
+ inputs_embeds=inputs_embeds,
928
+ output_attentions=output_attentions,
929
+ output_hidden_states=output_hidden_states,
930
+ return_dict=return_dict,
931
+ )
932
+
933
+ sequence_output = outputs[0]
934
+
935
+ logits = self.qa_outputs(sequence_output)
936
+ start_logits, end_logits = logits.split(1, dim=-1)
937
+ start_logits = start_logits.squeeze(-1).contiguous()
938
+ end_logits = end_logits.squeeze(-1).contiguous()
939
+
940
+ total_loss = None
941
+ if start_positions is not None and end_positions is not None:
942
+ # If we are on multi-GPU, split add a dimension
943
+ if len(start_positions.size()) > 1:
944
+ start_positions = start_positions.squeeze(-1)
945
+ if len(end_positions.size()) > 1:
946
+ end_positions = end_positions.squeeze(-1)
947
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
948
+ ignored_index = start_logits.size(1)
949
+ start_positions = start_positions.clamp(0, ignored_index)
950
+ end_positions = end_positions.clamp(0, ignored_index)
951
+
952
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
953
+ start_loss = loss_fct(start_logits, start_positions)
954
+ end_loss = loss_fct(end_logits, end_positions)
955
+ total_loss = (start_loss + end_loss) / 2
956
+
957
+ if not return_dict:
958
+ output = (start_logits, end_logits) + outputs[2:]
959
+ return ((total_loss,) + output) if total_loss is not None else output
960
+
961
+ return QuestionAnsweringModelOutput(
962
+ loss=total_loss,
963
+ start_logits=start_logits,
964
+ end_logits=end_logits,
965
+ hidden_states=outputs.hidden_states,
966
+ attentions=outputs.attentions,
967
+ )
968
+
969
+
970
+ @add_start_docstrings(
971
+ """ErnieMForInformationExtraction is a Ernie-M Model with two linear layer on top of the hidden-states output to
972
+ compute `start_prob` and `end_prob`, designed for Universal Information Extraction.""",
973
+ ERNIE_M_START_DOCSTRING,
974
+ )
975
+ # Copied from paddlenlp.transformers.ernie_m.modeling.UIEM
976
+ class ErnieMForInformationExtraction(ErnieMPreTrainedModel):
977
+ def __init__(self, config):
978
+ super(ErnieMForInformationExtraction, self).__init__(config)
979
+ self.ernie_m = ErnieMModel(config)
980
+ self.linear_start = nn.Linear(config.hidden_size, 1)
981
+ self.linear_end = nn.Linear(config.hidden_size, 1)
982
+ self.sigmoid = nn.Sigmoid()
983
+ self.post_init()
984
+
985
+ @add_start_docstrings_to_model_forward(ERNIE_M_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
986
+ def forward(
987
+ self,
988
+ input_ids: Optional[torch.Tensor] = None,
989
+ attention_mask: Optional[torch.Tensor] = None,
990
+ position_ids: Optional[torch.Tensor] = None,
991
+ head_mask: Optional[torch.Tensor] = None,
992
+ inputs_embeds: Optional[torch.Tensor] = None,
993
+ start_positions: Optional[torch.Tensor] = None,
994
+ end_positions: Optional[torch.Tensor] = None,
995
+ output_attentions: Optional[bool] = None,
996
+ output_hidden_states: Optional[bool] = None,
997
+ return_dict: Optional[bool] = True,
998
+ ) -> Union[Tuple[torch.FloatTensor], QuestionAnsweringModelOutput]:
999
+ r"""
1000
+ start_positions (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1001
+ Labels for position (index) for computing the start_positions loss. Position outside of the sequence are
1002
+ not taken into account for computing the loss.
1003
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1004
+ Labels for position (index) for computing the end_positions loss. Position outside of the sequence are not
1005
+ taken into account for computing the loss.
1006
+ """
1007
+
1008
+ result = self.ernie_m(
1009
+ input_ids,
1010
+ attention_mask=attention_mask,
1011
+ position_ids=position_ids,
1012
+ head_mask=head_mask,
1013
+ inputs_embeds=inputs_embeds,
1014
+ output_attentions=output_attentions,
1015
+ output_hidden_states=output_hidden_states,
1016
+ return_dict=return_dict,
1017
+ )
1018
+ if return_dict:
1019
+ sequence_output = result.last_hidden_state
1020
+ elif not return_dict:
1021
+ sequence_output = result[0]
1022
+
1023
+ start_logits = self.linear_start(sequence_output)
1024
+ start_logits = start_logits.squeeze(-1)
1025
+ end_logits = self.linear_end(sequence_output)
1026
+ end_logits = end_logits.squeeze(-1)
1027
+
1028
+ total_loss = None
1029
+ if start_positions is not None and end_positions is not None:
1030
+ # If we are on multi-GPU, split add a dimension
1031
+ if len(start_positions.size()) > 1:
1032
+ start_positions = start_positions.squeeze(-1)
1033
+ if len(end_positions.size()) > 1:
1034
+ end_positions = end_positions.squeeze(-1)
1035
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1036
+ ignored_index = start_logits.size(1)
1037
+ start_positions = start_positions.clamp(0, ignored_index)
1038
+ end_positions = end_positions.clamp(0, ignored_index)
1039
+
1040
+ loss_fct = BCEWithLogitsLoss()
1041
+ start_loss = loss_fct(start_logits, start_positions)
1042
+ end_loss = loss_fct(end_logits, end_positions)
1043
+ total_loss = (start_loss + end_loss) / 2
1044
+
1045
+ if not return_dict:
1046
+ return tuple(
1047
+ i
1048
+ for i in [total_loss, start_logits, end_logits, result.hidden_states, result.attentions]
1049
+ if i is not None
1050
+ )
1051
+
1052
+ return QuestionAnsweringModelOutput(
1053
+ loss=total_loss,
1054
+ start_logits=start_logits,
1055
+ end_logits=end_logits,
1056
+ hidden_states=result.hidden_states,
1057
+ attentions=result.attentions,
1058
+ )
venv/lib/python3.10/site-packages/transformers/models/ernie_m/tokenization_ernie_m.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Ernie-M."""
16
+
17
+ import io
18
+ import os
19
+ import unicodedata
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ SPIECE_UNDERLINE = "▁"
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
33
+
34
+ RESOURCE_FILES_NAMES = {
35
+ "sentencepiece_model_file": "sentencepiece.bpe.model",
36
+ "vocab_file": "vocab.txt",
37
+ }
38
+
39
+
40
+ # Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
41
+ class ErnieMTokenizer(PreTrainedTokenizer):
42
+ r"""
43
+ Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
44
+
45
+ Args:
46
+ sentencepiece_model_file (`str`):
47
+ The file path of sentencepiece model.
48
+ vocab_file (`str`, *optional*):
49
+ The file path of the vocabulary.
50
+ do_lower_case (`str`, *optional*, defaults to `True`):
51
+ Whether or not to lowercase the input when tokenizing.
52
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
53
+ A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
54
+ `unk_token` inorder to be converted to an ID.
55
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
56
+ A special token separating two different sentences in the same input.
57
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
58
+ A special token used to make arrays of tokens the same size for batching purposes.
59
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
60
+ A special token used for sequence classification. It is the last token of the sequence when built with
61
+ special tokens.
62
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
63
+ A special token representing a masked token. This is the token used in the masked language modeling task
64
+ which the model tries to predict the original unmasked ones.
65
+ """
66
+
67
+ # Ernie-M model doesn't have token_type embedding.
68
+ model_input_names: List[str] = ["input_ids"]
69
+
70
+ vocab_files_names = VOCAB_FILES_NAMES
71
+ resource_files_names = RESOURCE_FILES_NAMES
72
+
73
+ def __init__(
74
+ self,
75
+ sentencepiece_model_ckpt,
76
+ vocab_file=None,
77
+ do_lower_case=False,
78
+ encoding="utf8",
79
+ unk_token="[UNK]",
80
+ sep_token="[SEP]",
81
+ pad_token="[PAD]",
82
+ cls_token="[CLS]",
83
+ mask_token="[MASK]",
84
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
85
+ **kwargs,
86
+ ) -> None:
87
+ # Mask token behave like a normal word, i.e. include the space before it and
88
+ # is included in the raw text, there should be a match in a non-normalized sentence.
89
+
90
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
91
+
92
+ self.do_lower_case = do_lower_case
93
+ self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
94
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
95
+ self.sp_model.Load(sentencepiece_model_ckpt)
96
+
97
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
98
+ if vocab_file is not None:
99
+ self.vocab = self.load_vocab(filepath=vocab_file)
100
+ else:
101
+ self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
102
+ self.reverse_vocab = {v: k for k, v in self.vocab.items()}
103
+
104
+ super().__init__(
105
+ do_lower_case=do_lower_case,
106
+ unk_token=unk_token,
107
+ sep_token=sep_token,
108
+ pad_token=pad_token,
109
+ cls_token=cls_token,
110
+ mask_token=mask_token,
111
+ vocab_file=vocab_file,
112
+ encoding=encoding,
113
+ sp_model_kwargs=self.sp_model_kwargs,
114
+ **kwargs,
115
+ )
116
+
117
+ def get_offset_mapping(self, text):
118
+ if text is None:
119
+ return None
120
+
121
+ split_tokens = self.tokenize(text)
122
+ normalized_text, char_mapping = "", []
123
+
124
+ for i, ch in enumerate(text):
125
+ if ch in self.SP_CHAR_MAPPING:
126
+ ch = self.SP_CHAR_MAPPING.get(ch)
127
+ else:
128
+ ch = unicodedata.normalize("NFKC", ch)
129
+ if self.is_whitespace(ch):
130
+ continue
131
+ normalized_text += ch
132
+ char_mapping.extend([i] * len(ch))
133
+
134
+ text, token_mapping, offset = normalized_text, [], 0
135
+
136
+ if self.do_lower_case:
137
+ text = text.lower()
138
+
139
+ for token in split_tokens:
140
+ if token[:1] == "▁":
141
+ token = token[1:]
142
+ start = text[offset:].index(token) + offset
143
+ end = start + len(token)
144
+
145
+ token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
146
+ offset = end
147
+ return token_mapping
148
+
149
+ @property
150
+ def vocab_size(self):
151
+ return len(self.vocab)
152
+
153
+ def get_vocab(self):
154
+ return dict(self.vocab, **self.added_tokens_encoder)
155
+
156
+ def __getstate__(self):
157
+ state = self.__dict__.copy()
158
+ state["sp_model"] = None
159
+ return state
160
+
161
+ def __setstate__(self, d):
162
+ self.__dict__ = d
163
+
164
+ # for backward compatibility
165
+ if not hasattr(self, "sp_model_kwargs"):
166
+ self.sp_model_kwargs = {}
167
+
168
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
169
+ self.sp_model.Load(self.sentencepiece_model_ckpt)
170
+
171
+ def clean_text(self, text):
172
+ """Performs invalid character removal and whitespace cleanup on text."""
173
+ return "".join((self.SP_CHAR_MAPPING.get(c, c) for c in text))
174
+
175
+ def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
176
+ """Tokenize a string."""
177
+
178
+ if self.sp_model_kwargs.get("enable_sampling") is True:
179
+ enable_sampling = True
180
+ if self.sp_model_kwargs.get("alpha") is not None:
181
+ alpha = self.sp_model_kwargs.get("alpha")
182
+ if self.sp_model_kwargs.get("nbest_size") is not None:
183
+ nbest_size = self.sp_model_kwargs.get("nbest_size")
184
+
185
+ if not enable_sampling:
186
+ pieces = self.sp_model.EncodeAsPieces(text)
187
+ else:
188
+ pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
189
+ new_pieces = []
190
+ for pi, piece in enumerate(pieces):
191
+ if piece == SPIECE_UNDERLINE:
192
+ if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
193
+ new_pieces.append(SPIECE_UNDERLINE)
194
+ continue
195
+ else:
196
+ continue
197
+ lst_i = 0
198
+ for i, chunk in enumerate(piece):
199
+ if chunk == SPIECE_UNDERLINE:
200
+ continue
201
+ if self.is_ch_char(chunk) or self.is_punct(chunk):
202
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
203
+ new_pieces.append(piece[lst_i:i])
204
+ new_pieces.append(chunk)
205
+ lst_i = i + 1
206
+ elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
207
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
208
+ new_pieces.append(piece[lst_i:i])
209
+ lst_i = i
210
+ elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
211
+ if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
212
+ new_pieces.append(piece[lst_i:i])
213
+ lst_i = i
214
+ if len(piece) > lst_i:
215
+ new_pieces.append(piece[lst_i:])
216
+ return new_pieces
217
+
218
+ def convert_tokens_to_string(self, tokens):
219
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
220
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
221
+ return out_string
222
+
223
+ def convert_ids_to_string(self, ids):
224
+ """
225
+ Converts a sequence of tokens (strings for sub-words) in a single string.
226
+ """
227
+ tokens = self.convert_ids_to_tokens(ids)
228
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
229
+ return out_string
230
+
231
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
232
+ def _convert_token_to_id(self, token):
233
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
234
+
235
+ # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
236
+ def _convert_id_to_token(self, index):
237
+ """Converts an index (integer) in a token (str) using the vocab."""
238
+ return self.reverse_vocab.get(index, self.unk_token)
239
+
240
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
241
+ r"""
242
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
243
+ adding special tokens. An ErnieM sequence has the following format:
244
+
245
+ - single sequence: `[CLS] X [SEP]`
246
+ - pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
247
+
248
+ Args:
249
+ token_ids_0 (`List[int]`):
250
+ List of IDs to which the special tokens will be added.
251
+ token_ids_1 (`List[int]`, *optional*):
252
+ Optional second list of IDs for sequence pairs.
253
+ Returns:
254
+ `List[int]`: List of input_id with the appropriate special tokens.
255
+ """
256
+ if token_ids_1 is None:
257
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
258
+ _cls = [self.cls_token_id]
259
+ _sep = [self.sep_token_id]
260
+ return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
261
+
262
+ def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
263
+ r"""
264
+ Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
265
+ offset_mapping has the following format:
266
+
267
+ - single sequence: `(0,0) X (0,0)`
268
+ - pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
269
+
270
+ Args:
271
+ offset_mapping_ids_0 (`List[tuple]`):
272
+ List of char offsets to which the special tokens will be added.
273
+ offset_mapping_ids_1 (`List[tuple]`, *optional*):
274
+ Optional second list of wordpiece offsets for offset mapping pairs.
275
+ Returns:
276
+ `List[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
277
+ """
278
+ if offset_mapping_1 is None:
279
+ return [(0, 0)] + offset_mapping_0 + [(0, 0)]
280
+
281
+ return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
282
+
283
+ def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
284
+ r"""
285
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
286
+ special tokens using the tokenizer `encode` method.
287
+
288
+ Args:
289
+ token_ids_0 (`List[int]`):
290
+ List of ids of the first sequence.
291
+ token_ids_1 (`List[int]`, *optional*):
292
+ Optional second list of IDs for sequence pairs.
293
+ already_has_special_tokens (`str`, *optional*, defaults to `False`):
294
+ Whether or not the token list is already formatted with special tokens for the model.
295
+ Returns:
296
+ `List[int]`:
297
+ The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
298
+ """
299
+
300
+ if already_has_special_tokens:
301
+ if token_ids_1 is not None:
302
+ raise ValueError(
303
+ "You should not supply a second sequence if the provided sequence of "
304
+ "ids is already formatted with special tokens for the model."
305
+ )
306
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
307
+
308
+ if token_ids_1 is not None:
309
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
310
+ return [1] + ([0] * len(token_ids_0)) + [1]
311
+
312
+ def create_token_type_ids_from_sequences(
313
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
314
+ ) -> List[int]:
315
+ """
316
+ Create the token type IDs corresponding to the sequences passed. [What are token type
317
+ IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
318
+ building: those.
319
+
320
+ Args:
321
+ token_ids_0 (`List[int]`):
322
+ The first tokenized sequence.
323
+ token_ids_1 (`List[int]`, *optional*):
324
+ The second tokenized sequence.
325
+ Returns:
326
+ `List[int]`: The token type ids.
327
+ """
328
+ # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
329
+ if token_ids_1 is None:
330
+ # [CLS] X [SEP]
331
+ return (len(token_ids_0) + 2) * [0]
332
+
333
+ # [CLS] A [SEP] [SEP] B [SEP]
334
+ return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
335
+
336
+ def is_ch_char(self, char):
337
+ """
338
+ is_ch_char
339
+ """
340
+ if "\u4e00" <= char <= "\u9fff":
341
+ return True
342
+ return False
343
+
344
+ def is_alpha(self, char):
345
+ """
346
+ is_alpha
347
+ """
348
+ if ("a" <= char <= "z") or ("A" <= char <= "Z"):
349
+ return True
350
+ return False
351
+
352
+ def is_punct(self, char):
353
+ """
354
+ is_punct
355
+ """
356
+ if char in ",;:.?!~,;:。?!《》【】":
357
+ return True
358
+ return False
359
+
360
+ def is_whitespace(self, char):
361
+ """
362
+ is whitespace
363
+ """
364
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
365
+ return True
366
+ if len(char) == 1:
367
+ cat = unicodedata.category(char)
368
+ if cat == "Zs":
369
+ return True
370
+ return False
371
+
372
+ def load_vocab(self, filepath):
373
+ token_to_idx = {}
374
+ with io.open(filepath, "r", encoding="utf-8") as f:
375
+ for index, line in enumerate(f):
376
+ token = line.rstrip("\n")
377
+ token_to_idx[token] = int(index)
378
+
379
+ return token_to_idx
380
+
381
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
382
+ index = 0
383
+ if os.path.isdir(save_directory):
384
+ vocab_file = os.path.join(
385
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
386
+ )
387
+ else:
388
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
389
+ with open(vocab_file, "w", encoding="utf-8") as writer:
390
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
391
+ if index != token_index:
392
+ logger.warning(
393
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
394
+ " Please check that the vocabulary is not corrupted!"
395
+ )
396
+ index = token_index
397
+ writer.write(token + "\n")
398
+ index += 1
399
+
400
+ tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
401
+ with open(tokenizer_model_file, "wb") as fi:
402
+ content_spiece_model = self.sp_model.serialized_model_proto()
403
+ fi.write(content_spiece_model)
404
+
405
+ return (vocab_file,)
venv/lib/python3.10/site-packages/transformers/models/musicgen/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_musicgen": [
21
+ "MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "MusicgenConfig",
23
+ "MusicgenDecoderConfig",
24
+ ],
25
+ "processing_musicgen": ["MusicgenProcessor"],
26
+ }
27
+
28
+ try:
29
+ if not is_torch_available():
30
+ raise OptionalDependencyNotAvailable()
31
+ except OptionalDependencyNotAvailable:
32
+ pass
33
+ else:
34
+ _import_structure["modeling_musicgen"] = [
35
+ "MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
36
+ "MusicgenForConditionalGeneration",
37
+ "MusicgenForCausalLM",
38
+ "MusicgenModel",
39
+ "MusicgenPreTrainedModel",
40
+ ]
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_musicgen import (
44
+ MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP,
45
+ MusicgenConfig,
46
+ MusicgenDecoderConfig,
47
+ )
48
+ from .processing_musicgen import MusicgenProcessor
49
+
50
+ try:
51
+ if not is_torch_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ from .modeling_musicgen import (
57
+ MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
58
+ MusicgenForCausalLM,
59
+ MusicgenForConditionalGeneration,
60
+ MusicgenModel,
61
+ MusicgenPreTrainedModel,
62
+ )
63
+
64
+ else:
65
+ import sys
66
+
67
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/configuration_musicgen.cpython-310.pyc ADDED
Binary file (9.87 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/convert_musicgen_transformers.cpython-310.pyc ADDED
Binary file (6.25 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/modeling_musicgen.cpython-310.pyc ADDED
Binary file (80.9 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/musicgen/__pycache__/processing_musicgen.cpython-310.pyc ADDED
Binary file (4.5 kB). View file