applied-ai-018 commited on
Commit
d487907
·
verified ·
1 Parent(s): dd14b2a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/tests/models/test_gguf.py +152 -0
  2. lm-evaluation-harness/tests/models/test_huggingface.py +143 -0
  3. lm-evaluation-harness/tests/models/test_neuralmagic.py +61 -0
  4. lm-evaluation-harness/tests/models/test_neuron_optimum.py +26 -0
  5. lm-evaluation-harness/tests/models/test_openvino.py +73 -0
  6. lm-evaluation-harness/tests/models/test_vllm.py +51 -0
  7. lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-loglikelihood +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/multirc-v1-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/pile_arxiv-v0-loglikelihood_rolling +1 -0
  16. lm-evaluation-harness/tests/testdata/pile_philpapers-v1-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-loglikelihood_rolling +1 -0
  18. lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling +1 -0
  19. lm-evaluation-harness/wandb/debug-cli.root.log +0 -0
  20. lm-evaluation-harness/wandb/debug-internal.log +0 -0
  21. lm-evaluation-harness/wandb/debug.log +36 -0
  22. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/config.yaml +43 -0
  23. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log +34 -0
  24. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/requirements.txt +155 -0
  25. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-metadata.json +850 -0
  26. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-summary.json +1 -0
  27. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug-internal.log +183 -0
  28. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug.log +29 -0
  29. lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/run-1oo0voi6.wandb +0 -0
  30. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/config.yaml +43 -0
  31. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log +34 -0
  32. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/requirements.txt +155 -0
  33. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-metadata.json +850 -0
  34. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-summary.json +1 -0
  35. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug-internal.log +183 -0
  36. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug.log +29 -0
  37. lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/run-vm5e7ag8.wandb +0 -0
  38. lm-evaluation-harness/wandb/run-20240530_125856-v5b29ywz/run-v5b29ywz.wandb +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py +97 -0
  40. venv/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py +117 -0
  41. venv/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py +286 -0
  42. venv/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py +574 -0
  43. venv/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py +681 -0
  44. venv/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +184 -0
  45. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/modeling_mobilenet_v1.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py +65 -0
  48. venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc +0 -0
lm-evaluation-harness/tests/models/test_gguf.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import json
3
+ import os
4
+ import pickle
5
+ import unittest
6
+ from unittest.mock import patch
7
+
8
+ from lm_eval.api.instance import Instance
9
+ from lm_eval.models.gguf import GGUFLM
10
+
11
+
12
+ base_url = "https://matthoffner-ggml-llm-api.hf.space"
13
+
14
+
15
+ def gguf_completion_mock(base_url=None, **kwargs):
16
+ # Generate a hash from the parameters
17
+ hash_kwargs = {"base_url": base_url, **kwargs}
18
+ hash = hashlib.sha256(
19
+ json.dumps(hash_kwargs, sort_keys=True).encode("utf-8")
20
+ ).hexdigest()
21
+
22
+ fname = f"./tests/testdata/gguf_test_{hash}.pkl"
23
+
24
+ if os.path.exists(fname):
25
+ with open(fname, "rb") as fh:
26
+ return pickle.load(fh)
27
+ else:
28
+ print("The file does not exist, attempting to write...")
29
+ if "stop" in kwargs:
30
+ result = {
31
+ "choices": [
32
+ {
33
+ "text": f"generated text until {kwargs['stop']}",
34
+ "logprobs": {"token_logprobs": [-1.2345], "text_offset": 0},
35
+ "finish_reason": "length",
36
+ }
37
+ ]
38
+ }
39
+ else:
40
+ # generated with # curl -X 'POST' 'http://localhost:8000/v1/completions' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"prompt": "string", "logprobs": 10, "temperature": 0.0, "max_tokens": 1, "echo": true}'
41
+ result = {
42
+ "id": "cmpl-4023976b-bc6a-43b0-a5a9-629f4216c7f3",
43
+ "object": "text_completion",
44
+ "created": 1700511361,
45
+ "model": "../llama-2-7b.Q8_0.gguf",
46
+ "choices": [
47
+ {
48
+ "text": "string(",
49
+ "index": 0,
50
+ "logprobs": {
51
+ "text_offset": [0, 7],
52
+ "token_logprobs": [None, -1.033263319857306],
53
+ "tokens": [" string", "("],
54
+ "top_logprobs": [
55
+ None,
56
+ {
57
+ "(": -1.033263319857306,
58
+ "[]": -2.6530743779017394,
59
+ ".": -3.0377145947291324,
60
+ "\n": -3.0399156750513976,
61
+ "_": -3.510376089937872,
62
+ " =": -3.6957918347193663,
63
+ ",": -3.9309459866358702,
64
+ " of": -4.2834550083949035,
65
+ '("': -4.322762841112799,
66
+ "()": -4.426229113466925,
67
+ },
68
+ ],
69
+ },
70
+ "finish_reason": "length",
71
+ }
72
+ ],
73
+ "usage": {
74
+ "prompt_tokens": 2,
75
+ "completion_tokens": 1,
76
+ "total_tokens": 3,
77
+ },
78
+ }
79
+
80
+ try:
81
+ os.makedirs(os.path.dirname(fname), exist_ok=True)
82
+ print("Writing file at", fname)
83
+ with open(fname, "wb") as fh:
84
+ pickle.dump(result, fh)
85
+ print("File written successfully")
86
+ except Exception as e:
87
+ print("File writing failed:", e)
88
+
89
+ return result
90
+
91
+
92
+ class GGUFLMTest(unittest.TestCase):
93
+ @patch(
94
+ "lm_eval.models.gguf.GGUFLM.gguf_completion", side_effect=gguf_completion_mock
95
+ )
96
+ def test_loglikelihood(self, gguf_completion_mock):
97
+ lm = GGUFLM(base_url)
98
+
99
+ # Test loglikelihood
100
+ requests = [
101
+ Instance(
102
+ request_type="loglikelihood",
103
+ doc=args,
104
+ arguments=args,
105
+ idx=i,
106
+ )
107
+ for i, args in enumerate([("str", "ing"), ("str", "ing")])
108
+ ]
109
+ res = lm.loglikelihood(requests)
110
+
111
+ # Assert the loglikelihood response is correct
112
+ expected_res = [(logprob, True) for logprob in [0, 0]]
113
+ self.assertEqual(res, expected_res)
114
+
115
+ @patch(
116
+ "lm_eval.models.gguf.GGUFLM.gguf_completion", side_effect=gguf_completion_mock
117
+ )
118
+ def test_generate_until(self, gguf_completion_mock):
119
+ lm = GGUFLM(base_url)
120
+
121
+ # Test generate_until
122
+ requests = [
123
+ Instance(
124
+ request_type="generate_until",
125
+ doc={"input": doc},
126
+ arguments=(doc, {"until": stop}),
127
+ idx=i,
128
+ )
129
+ for i, (doc, stop) in enumerate([("input1", "stop1"), ("input2", "stop2")])
130
+ ]
131
+
132
+ res = lm.generate_until(requests)
133
+
134
+ # Assert the generate_until response is correct
135
+ expected_res = ["generated text until stop1", "generated text until stop2"]
136
+ self.assertEqual(res, expected_res)
137
+
138
+ # @patch('lm_eval.models.gguf.GGUFLM.gguf_completion', side_effect=gguf_completion_mock)
139
+ # def test_loglikelihood_rolling(self, gguf_completion_mock):
140
+ # lm = GGUFLM(base_url)
141
+
142
+ # # Test loglikelihood_rolling
143
+ # requests = ["input1", "input2"]
144
+ # res = lm.loglikelihood_rolling(requests)
145
+
146
+ # # Assert the loglikelihood_rolling response is correct
147
+ # expected_res = [(-1.2345, True), (-1.2345, True)]
148
+ # self.assertEqual(res, expected_res)
149
+
150
+
151
+ if __name__ == "__main__":
152
+ unittest.main()
lm-evaluation-harness/tests/models/test_huggingface.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ import lm_eval.tasks as tasks
10
+ from lm_eval.api.instance import Instance
11
+ from lm_eval.models.huggingface import HFLM
12
+
13
+
14
+ task_manager = tasks.TaskManager()
15
+
16
+
17
+ class Test_HFLM:
18
+ torch.use_deterministic_algorithms(True)
19
+ task_list = task_manager.load_task_or_group(["arc_easy", "gsm8k", "wikitext"])
20
+ version_minor = sys.version_info.minor
21
+ multiple_choice_task = task_list["arc_easy"] # type: ignore
22
+ multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1)
23
+ MULTIPLE_CH: list[Instance] = multiple_choice_task.instances
24
+ generate_until_task = task_list["gsm8k"] # type: ignore
25
+ generate_until_task._config.generation_kwargs["max_gen_toks"] = 10
26
+ generate_until_task.build_all_requests(limit=10, rank=0, world_size=1)
27
+ generate_until: list[Instance] = generate_until_task.instances
28
+ rolling_task = task_list["wikitext"] # type: ignore
29
+ rolling_task.build_all_requests(limit=10, rank=0, world_size=1)
30
+ ROLLING: list[Instance] = rolling_task.instances
31
+
32
+ MULTIPLE_CH_RES = [
33
+ -41.902435302734375,
34
+ -42.939308166503906,
35
+ -33.914180755615234,
36
+ -37.07139205932617,
37
+ -22.95258331298828,
38
+ -20.342208862304688,
39
+ -14.818366050720215,
40
+ -27.942853927612305,
41
+ -15.80704116821289,
42
+ -15.936427116394043,
43
+ -13.052018165588379,
44
+ -18.04828453063965,
45
+ -13.345029830932617,
46
+ -13.366025924682617,
47
+ -12.127134323120117,
48
+ -11.872495651245117,
49
+ -47.10598373413086,
50
+ -47.76410675048828,
51
+ -36.4406852722168,
52
+ -50.0289421081543,
53
+ -16.72093963623047,
54
+ -18.535587310791016,
55
+ -26.46993637084961,
56
+ -20.355995178222656,
57
+ -17.757919311523438,
58
+ -21.80595588684082,
59
+ -33.1990852355957,
60
+ -39.28636932373047,
61
+ -14.759679794311523,
62
+ -16.753942489624023,
63
+ -11.486852645874023,
64
+ -15.42177677154541,
65
+ -13.15798282623291,
66
+ -15.887393951416016,
67
+ -15.28614616394043,
68
+ -12.339089393615723,
69
+ -44.59441375732422,
70
+ -55.40888214111328,
71
+ -52.70050811767578,
72
+ -56.25089645385742,
73
+ ]
74
+ generate_until_RES = [
75
+ " The average of $2.50 each is $",
76
+ " A robe takes 2 bolts of blue fiber and half",
77
+ " $50,000 in repairs.\n\nQuestion",
78
+ " He runs 1 sprint 3 times a week.",
79
+ " They feed each of her chickens three cups of mixed",
80
+ " The price of the glasses is $5, but",
81
+ " The total percentage of students who said they like to",
82
+ " Carla is downloading a 200 GB file. Normally",
83
+ " John drives for 3 hours at a speed of 60",
84
+ " Eliza sells 4 tickets to 5 friends so she",
85
+ ]
86
+ ROLLING_RES = [
87
+ -3603.6328125,
88
+ -19779.23974609375,
89
+ -8834.16455078125,
90
+ -27967.591796875,
91
+ -7636.794982910156,
92
+ -9491.93505859375,
93
+ -41043.4248046875,
94
+ -8397.689819335938,
95
+ -45969.47155761719,
96
+ -7158.90625,
97
+ ]
98
+ LM = HFLM(pretrained="EleutherAI/pythia-70m", device="cpu", dtype="float32")
99
+
100
+ def test_logliklihood(self) -> None:
101
+ res = self.LM.loglikelihood(self.MULTIPLE_CH)
102
+ _RES, _res = self.MULTIPLE_CH_RES, [r[0] for r in res]
103
+ # log samples to CI
104
+ dir_path = Path("test_logs")
105
+ dir_path.mkdir(parents=True, exist_ok=True)
106
+
107
+ file_path = dir_path / f"outputs_log_{self.version_minor}.txt"
108
+ file_path = file_path.resolve()
109
+ with open(file_path, "w") as f:
110
+ f.write("\n".join(str(x) for x in _res))
111
+ assert np.allclose(_res, _RES, atol=1e-2)
112
+ # check indices for Multiple Choice
113
+ argmax_RES, argmax_res = (
114
+ np.argmax(np.array(_RES).reshape(-1, 4), axis=1),
115
+ np.argmax(np.array(_res).reshape(-1, 4), axis=1),
116
+ )
117
+ assert (argmax_RES == argmax_res).all()
118
+
119
+ def test_generate_until(self) -> None:
120
+ res = self.LM.generate_until(self.generate_until)
121
+ assert res == self.generate_until_RES
122
+
123
+ def test_logliklihood_rolling(self) -> None:
124
+ res = self.LM.loglikelihood_rolling(self.ROLLING)
125
+ assert np.allclose(res, self.ROLLING_RES, atol=1e-1)
126
+
127
+ def test_toc_encode(self) -> None:
128
+ res = self.LM.tok_encode("foo bar")
129
+ assert res == [12110, 2534]
130
+
131
+ def test_toc_decode(self) -> None:
132
+ res = self.LM.tok_decode([12110, 2534])
133
+ assert res == "foo bar"
134
+
135
+ def test_batch_encode(self) -> None:
136
+ res = self.LM.tok_batch_encode(["foo bar", "bar foo"])[0].tolist()
137
+ assert res == [[12110, 2534], [2009, 17374]]
138
+
139
+ def test_model_generate(self) -> None:
140
+ context = self.LM.tok_batch_encode(["foo bar"])[0]
141
+ res = self.LM._model_generate(context, max_length=10, stop=["\n\n"])
142
+ res = self.LM.tok_decode(res[0])
143
+ assert res == "foo bar\n<bazhang>!info bar"
lm-evaluation-harness/tests/models/test_neuralmagic.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import lm_eval.evaluator as evaluator
4
+ from lm_eval.api.registry import get_model
5
+
6
+
7
+ SPARSEML_MODELS_TASKS = [
8
+ # loglikelihood
9
+ ("facebook/opt-125m", "lambada_openai"),
10
+ # loglikelihood_rolling
11
+ ("hf-internal-testing/tiny-random-gpt2", "wikitext"),
12
+ # generate_until
13
+ ("mgoin/tiny-random-llama-2-quant", "gsm8k"),
14
+ ]
15
+
16
+ DEEPSPARSE_MODELS_TASKS = [
17
+ # loglikelihood
18
+ ("hf:mgoin/llama2.c-stories15M-quant-ds", "lambada_openai"),
19
+ # loglikelihood_rolling (not supported yet)
20
+ # ("hf:mgoin/llama2.c-stories15M-quant-ds", "wikitext"),
21
+ # generate_until
22
+ ("hf:mgoin/llama2.c-stories15M-quant-ds", "gsm8k"),
23
+ ]
24
+
25
+
26
+ @pytest.mark.parametrize("model_id,task", SPARSEML_MODELS_TASKS)
27
+ def test_sparseml_eval(model_id, task):
28
+ lm = get_model("sparseml").create_from_arg_string(
29
+ f"pretrained={model_id}",
30
+ {
31
+ "batch_size": 1,
32
+ "device": "cpu",
33
+ "dtype": "float32",
34
+ },
35
+ )
36
+
37
+ limit = 5
38
+ evaluator.simple_evaluate(
39
+ model=lm,
40
+ tasks=[task],
41
+ num_fewshot=0,
42
+ limit=limit,
43
+ )
44
+
45
+
46
+ @pytest.mark.parametrize("model_id,task", DEEPSPARSE_MODELS_TASKS)
47
+ def test_deepsparse_eval(model_id, task):
48
+ lm = get_model("deepsparse").create_from_arg_string(
49
+ f"pretrained={model_id}",
50
+ {
51
+ "batch_size": 1,
52
+ },
53
+ )
54
+
55
+ limit = 5
56
+ evaluator.simple_evaluate(
57
+ model=lm,
58
+ tasks=[task],
59
+ num_fewshot=0,
60
+ limit=limit,
61
+ )
lm-evaluation-harness/tests/models/test_neuron_optimum.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import torch
3
+
4
+ from lm_eval.models.neuron_optimum import wrap_constant_batch_size
5
+
6
+
7
+ def test_wrap_constant_batch_size():
8
+ class Tester:
9
+ def __init__(self, batch_size):
10
+ self.batch_size = batch_size
11
+
12
+ @wrap_constant_batch_size
13
+ def test_constant_batch_size(self, inputs):
14
+ assert len(inputs) == self.batch_size
15
+ return inputs
16
+
17
+ batch_size_test = 8
18
+ for i in range(1, batch_size_test + 1):
19
+ tensor = torch.ones([i, 2, 2])
20
+ out = Tester(batch_size=batch_size_test).test_constant_batch_size(tensor)
21
+ torch.testing.assert_allclose(out, tensor)
22
+
23
+ with pytest.raises(ValueError):
24
+ Tester(batch_size=batch_size_test).test_constant_batch_size(
25
+ torch.ones([batch_size_test + 1, 2, 2])
26
+ )
lm-evaluation-harness/tests/models/test_openvino.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import tempfile
3
+
4
+ import pytest
5
+ from optimum.intel import OVModelForCausalLM
6
+ from transformers import AutoTokenizer
7
+
8
+ import lm_eval.evaluator as evaluator
9
+ from lm_eval.api.registry import get_model
10
+
11
+
12
+ SUPPORTED_ARCHITECTURES_TASKS = {
13
+ "facebook/opt-125m": "lambada_openai",
14
+ "hf-internal-testing/tiny-random-gpt2": "wikitext",
15
+ }
16
+
17
+
18
+ @pytest.mark.parametrize("model_id,task", SUPPORTED_ARCHITECTURES_TASKS.items())
19
+ def test_evaluator(model_id, task):
20
+ with tempfile.TemporaryDirectory() as tmpdirname:
21
+ model = OVModelForCausalLM.from_pretrained(
22
+ model_id, export=True, use_cache=True
23
+ )
24
+ model.save_pretrained(tmpdirname)
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
26
+ tokenizer.save_pretrained(tmpdirname)
27
+
28
+ lm = get_model("openvino").create_from_arg_string(
29
+ f"pretrained={tmpdirname}",
30
+ {
31
+ "batch_size": 1,
32
+ "device": "cpu",
33
+ },
34
+ )
35
+
36
+ def ll_fn(reqs):
37
+ for ctx, cont in [req.args for req in reqs]:
38
+ if len(ctx) == 0:
39
+ continue
40
+ # space convention
41
+ assert ctx[-1] != " "
42
+ assert cont[0] == " " or ctx[-1] == "\n"
43
+
44
+ res = []
45
+
46
+ random.seed(42)
47
+ for _ in reqs:
48
+ res.append((-random.random(), False))
49
+
50
+ return res
51
+
52
+ def ll_perp_fn(reqs):
53
+ for (string,) in [req.args for req in reqs]:
54
+ assert isinstance(string, str)
55
+
56
+ res = []
57
+ random.seed(42)
58
+ for _ in reqs:
59
+ res.append(-random.random())
60
+
61
+ return res
62
+
63
+ lm.loglikelihood = ll_fn
64
+ lm.loglikelihood_rolling = ll_perp_fn
65
+
66
+ limit = 10
67
+ evaluator.simple_evaluate(
68
+ model=lm,
69
+ tasks=[task],
70
+ num_fewshot=0,
71
+ limit=limit,
72
+ bootstrap_iters=10,
73
+ )
lm-evaluation-harness/tests/models/test_vllm.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import pytest
4
+ import torch
5
+
6
+ import lm_eval.tasks as tasks
7
+ from lm_eval.api.instance import Instance
8
+
9
+
10
+ task_manager = tasks.TaskManager()
11
+
12
+
13
+ @pytest.mark.skip(reason="requires CUDA")
14
+ class TEST_VLLM:
15
+ vllm = pytest.importorskip("vllm")
16
+ try:
17
+ from lm_eval.models.vllm_causallms import VLLM
18
+
19
+ LM = VLLM(pretrained="EleutherAI/pythia-70m")
20
+ except ModuleNotFoundError:
21
+ pass
22
+ torch.use_deterministic_algorithms(True)
23
+ task_list = task_manager.load_task_or_group(["arc_easy", "gsm8k", "wikitext"])
24
+ multiple_choice_task = task_list["arc_easy"] # type: ignore
25
+ multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1)
26
+ MULTIPLE_CH: List[Instance] = multiple_choice_task.instances
27
+ generate_until_task = task_list["gsm8k"] # type: ignore
28
+ generate_until_task._config.generation_kwargs["max_gen_toks"] = 10
29
+ generate_until_task.build_all_requests(limit=10, rank=0, world_size=1)
30
+ generate_until: List[Instance] = generate_until_task.instances
31
+ rolling_task = task_list["wikitext"] # type: ignore
32
+ rolling_task.build_all_requests(limit=10, rank=0, world_size=1)
33
+ ROLLING: List[Instance] = rolling_task.instances
34
+
35
+ # TODO: make proper tests
36
+ def test_logliklihood(self) -> None:
37
+ res = self.LM.loglikelihood(self.MULTIPLE_CH)
38
+ assert len(res) == len(self.MULTIPLE_CH)
39
+ for x in res:
40
+ assert isinstance(x[0], float)
41
+
42
+ def test_generate_until(self) -> None:
43
+ res = self.LM.generate_until(self.generate_until)
44
+ assert len(res) == len(self.generate_until)
45
+ for x in res:
46
+ assert isinstance(x, str)
47
+
48
+ def test_logliklihood_rolling(self) -> None:
49
+ res = self.LM.loglikelihood_rolling(self.ROLLING)
50
+ for x in res:
51
+ assert isinstance(x, float)
lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relational_noun-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8aab641bd5933f84f46a14f5c1208a3c855cace7e67b44abcd5aff8fec96717d
lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9b324b28ae3e1b5d49ecf4b7b2a16c7bbc8ff38d000cf216fab75df633da2084
lm-evaluation-harness/tests/testdata/blimp_expletive_it_object_raising-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_expletive_it_object_raising": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_expletive_it_object_raising": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_race_color": {"likelihood_difference": 0.3322827903840805, "likelihood_difference_stderr": 0.01019838186372816, "pct_stereotype": 0.4822834645669291, "pct_stereotype_stderr": 0.022191835500120254}}, "versions": {"crows_pairs_english_race_color": 0}}
lm-evaluation-harness/tests/testdata/hellaswag-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hellaswag": {"acc": 0.24965146385182235, "acc_norm": 0.24756024696275641, "acc_norm_stderr": 0.004307128573285236, "acc_stderr": 0.004319267432460666}}, "versions": {"hellaswag": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-econometrics": {"acc": 0.24561403508771928, "acc_norm": 0.24561403508771928, "acc_norm_stderr": 0.04049339297748142, "acc_stderr": 0.040493392977481425}}, "versions": {"hendrycksTest-econometrics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-us_foreign_policy": {"acc": 0.2, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909283, "acc_stderr": 0.040201512610368445}}, "versions": {"hendrycksTest-us_foreign_policy": 0}}
lm-evaluation-harness/tests/testdata/multirc-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"multirc": {"acc": 0.046169989506820566, "acc_stderr": 0.006801377886208738}}, "versions": {"multirc": 1}}
lm-evaluation-harness/tests/testdata/pile_arxiv-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 814f9954e44368559602c00f7e85fa3971acdfd0315f508ec7df6318a79c55ec
lm-evaluation-harness/tests/testdata/pile_philpapers-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_philpapers": {"bits_per_byte": 9.004690592465457e-06, "byte_perplexity": 1.0000062415953748, "word_perplexity": 1.0000409888564146}}, "versions": {"pile_philpapers": 1}}
lm-evaluation-harness/tests/testdata/pile_pubmed-central-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 40b39d120d99a145690444e86acc3e3e24d41e6e0538a75e26929ad84926e5e0
lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 4eb69e314f0864ec8890e2323d7e76f8a8309692c4f090e2b41bf4be681a811d
lm-evaluation-harness/wandb/debug-cli.root.log ADDED
File without changes
lm-evaluation-harness/wandb/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
lm-evaluation-harness/wandb/debug.log ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Current SDK version is 0.17.1
2
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Configure stats pid to 30255
3
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-06-08 19:03:33,772 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-06-08 19:03:33,773 WARNING MainThread:30255 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug.log
11
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240608_190333-82mnef5m/logs/debug-internal.log
12
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():560] calling init triggers
13
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():610] starting backend
16
+ 2024-06-08 19:03:33,773 INFO MainThread:30255 [wandb_init.py:init():614] setting up manager
17
+ 2024-06-08 19:03:33,776 INFO MainThread:30255 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-06-08 19:03:33,777 INFO MainThread:30255 [wandb_init.py:init():622] backend started and connected
19
+ 2024-06-08 19:03:33,781 INFO MainThread:30255 [wandb_init.py:init():711] updated telemetry
20
+ 2024-06-08 19:03:33,790 INFO MainThread:30255 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-06-08 19:03:33,992 INFO MainThread:30255 [wandb_run.py:_on_init():2402] communicating current version
22
+ 2024-06-08 19:03:34,052 INFO MainThread:30255 [wandb_run.py:_on_init():2411] got version response
23
+ 2024-06-08 19:03:34,052 INFO MainThread:30255 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-06-08 19:03:34,350 INFO MainThread:30255 [wandb_run.py:_console_start():2380] atexit reg
25
+ 2024-06-08 19:03:34,350 INFO MainThread:30255 [wandb_run.py:_redirect():2235] redirect: wrap_raw
26
+ 2024-06-08 19:03:34,351 INFO MainThread:30255 [wandb_run.py:_redirect():2300] Wrapping output streams.
27
+ 2024-06-08 19:03:34,351 INFO MainThread:30255 [wandb_run.py:_redirect():2325] Redirects installed.
28
+ 2024-06-08 19:03:34,353 INFO MainThread:30255 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-06-08 20:15:51,241 INFO MainThread:30255 [wandb_run.py:_config_callback():1382] config_cb None None {'task_configs': {'arc_easy': {'task': 'arc_easy', 'group': ['ai2_arc'], 'dataset_path': 'allenai/ai2_arc', 'dataset_name': 'ARC-Easy', 'training_split': 'train', 'validation_split': 'validation', 'test_split': 'test', 'doc_to_text': 'Question: {{question}}\nAnswer:', 'doc_to_target': '{{choices.label.index(answerKey)}}', 'doc_to_choice': '{{choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'boolq': {'task': 'boolq', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'boolq', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{passage}}\nQuestion: {{question}}?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'passage', 'metadata': {'version': 2.0}}, 'copa': {'task': 'copa', 'group': ['super-glue-lm-eval-v1'], 'dataset_path': 'super_glue', 'dataset_name': 'copa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n # Drop the period\n connector = {\n "cause": "because",\n "effect": "therefore",\n }[doc["question"]]\n return doc["premise"].strip()[:-1] + f" {connector}"\n', 'doc_to_target': 'def doc_to_target(doc):\n correct_choice = doc["choice1"] if doc["label"] == 0 else doc["choice2"]\n # Connect the sentences\n return " " + convert_choice(correct_choice)\n', 'doc_to_choice': 'def doc_to_choice(doc):\n return [" " + convert_choice(doc["choice1"]), " " + convert_choice(doc["choice2"])]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'indic_arc_challenge_hi': {'task': 'indic_arc_challenge_hi', 'group': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Challenge', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_arc_easy_hi': {'task': 'indic_arc_easy_hi', 'group': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_path': 'Cognitive-Lab/Indic-ARC-Easy', 'dataset_name': 'hi', 'test_split': 'test', 'doc_to_text': 'Question: {{translated_question}}\nAnswer:', 'doc_to_target': '{{translated_choices.label.index(answerKey)}}', 'doc_to_choice': '{{translated_choices.text}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'Question: {{translated_question}}\nAnswer:', 'metadata': {'version': 1.0}}, 'indic_boolq_hi': {'task': 'indic_boolq_hi', 'group': 'Cognitive-Lab/Indic-BoolQ', 'dataset_path': 'Cognitive-Lab/Indic-BoolQ', 'dataset_name': 'hi', 'validation_split': 'validation', 'doc_to_text': 'Passage: {translated_passage}\nQuestion: {translated_question.strip()}\nAnswer:', 'doc_to_target': 'answer', 'doc_to_choice': ['true', 'false'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'mrpc': {'task': 'mrpc', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'mrpc', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Sentence 1: {{sentence1}}\nSentence 2: {{sentence2}}\nQuestion: Do both sentences mean the same thing?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['no', 'yes'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}, {'metric': 'f1'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'piqa': {'task': 'piqa', 'dataset_path': 'piqa', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'Question: {{goal}}\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': '{{[sol1, sol2]}}', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}, {'metric': 'acc_norm', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'goal', 'metadata': {'version': 1.0}}, 'sst2': {'task': 'sst2', 'group': 'glue', 'dataset_path': 'glue', 'dataset_name': 'sst2', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': '{{sentence}}\nQuestion: Is this sentence positive or negative?\nAnswer:', 'doc_to_target': 'label', 'doc_to_choice': ['negative', 'positive'], 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc'}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': False, 'metadata': {'version': 1.0}}, 'winogrande': {'task': 'winogrande', 'dataset_path': 'winogrande', 'dataset_name': 'winogrande_xl', 'training_split': 'train', 'validation_split': 'validation', 'doc_to_text': 'def doc_to_text(doc):\n answer_to_num = {"1": 0, "2": 1}\n return answer_to_num[doc["answer"]]\n', 'doc_to_target': 'def doc_to_target(doc):\n idx = doc["sentence"].index("_") + 1\n return doc["sentence"][idx:].strip()\n', 'doc_to_choice': 'def doc_to_choice(doc):\n idx = doc["sentence"].index("_")\n options = [doc["option1"], doc["option2"]]\n return [doc["sentence"][:idx] + opt for opt in options]\n', 'description': '', 'target_delimiter': ' ', 'fewshot_delimiter': '\n\n', 'num_fewshot': 0, 'metric_list': [{'metric': 'acc', 'aggregation': 'mean', 'higher_is_better': True}], 'output_type': 'multiple_choice', 'repeats': 1, 'should_decontaminate': True, 'doc_to_decontamination_query': 'sentence', 'metadata': {'version': 1.0}}}, 'cli_configs': {'model': 'hf', 'model_args': 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-enhibn-updated/llamav2-3b/hf/global_step240000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/ConvertedTokenizer', 'batch_size': 'auto', 'batch_sizes': [64], 'device': None, 'use_cache': None, 'limit': None, 'bootstrap_iters': 100000, 'gen_kwargs': None}}
30
+ 2024-06-08 20:15:51,834 INFO MainThread:30255 [wandb_run.py:_finish():2109] finishing run smlgenai/engl-hi-eval/82mnef5m
31
+ 2024-06-08 20:15:51,834 INFO MainThread:30255 [wandb_run.py:_atexit_cleanup():2349] got exitcode: 0
32
+ 2024-06-08 20:15:51,835 INFO MainThread:30255 [wandb_run.py:_restore():2332] restore
33
+ 2024-06-08 20:15:51,835 INFO MainThread:30255 [wandb_run.py:_restore():2338] restore done
34
+ 2024-06-08 20:15:57,740 INFO MainThread:30255 [wandb_run.py:_footer_history_summary_info():4008] rendering history
35
+ 2024-06-08 20:15:57,741 INFO MainThread:30255 [wandb_run.py:_footer_history_summary_info():4040] rendering summary
36
+ 2024-06-08 20:15:57,748 INFO MainThread:30255 [wandb_run.py:_footer_sync_info():3967] logging synced files
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716469406
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:13:03:27,131 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:13:03:35,583 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:13:03:35,584 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:13:03:35,584 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000'}
6
+ 2024-05-23:13:03:37,878 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.3.0
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T13:03:26.924960",
5
+ "startedAt": "2024-05-23T13:03:26.400431",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2332.72725,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3198.822,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.61690521240234
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379539489746
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:03:26,421 INFO StreamThr :1072 [internal.py:wandb_internal():85] W&B internal server running at pid: 1072, started at: 2024-05-23 13:03:26.419743
2
+ 2024-05-23 13:03:26,426 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 13:03:26,427 INFO WriterThread:1072 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/run-1oo0voi6.wandb
4
+ 2024-05-23 13:03:26,429 DEBUG SenderThread:1072 [sender.py:send():378] send: header
5
+ 2024-05-23 13:03:26,433 DEBUG SenderThread:1072 [sender.py:send():378] send: run
6
+ 2024-05-23 13:03:26,724 INFO SenderThread:1072 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files
7
+ 2024-05-23 13:03:26,725 INFO SenderThread:1072 [sender.py:_start_run_threads():1123] run started: 1oo0voi6 with start time 1716469406.420894
8
+ 2024-05-23 13:03:26,726 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 13:03:26,726 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 13:03:26,850 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 13:03:26,852 DEBUG HandlerThread:1072 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 13:03:26,852 DEBUG HandlerThread:1072 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 13:03:26,852 INFO HandlerThread:1072 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 13:03:26,852 INFO SystemMonitor:1072 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 13:03:26,853 INFO HandlerThread:1072 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 13:03:26,859 INFO SystemMonitor:1072 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 13:03:26,860 INFO SystemMonitor:1072 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 13:03:26,860 INFO SystemMonitor:1072 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 13:03:26,860 INFO SystemMonitor:1072 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 13:03:26,924 DEBUG HandlerThread:1072 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 13:03:26,928 DEBUG HandlerThread:1072 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 13:03:26,938 ERROR HandlerThread:1072 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 13:03:26,938 DEBUG HandlerThread:1072 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 13:03:26,938 DEBUG HandlerThread:1072 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 13:03:26,938 DEBUG HandlerThread:1072 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:03:26.924960', 'startedAt': '2024-05-23T13:03:26.400431', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step10000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2332.72725, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3198.822, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.61690521240234}}, 'memory': {'total': 1007.4379539489746}}
31
+ 2024-05-23 13:03:26,938 INFO HandlerThread:1072 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 13:03:26,938 INFO HandlerThread:1072 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 13:03:26,941 INFO HandlerThread:1072 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 13:03:26,946 DEBUG SenderThread:1072 [sender.py:send():378] send: files
35
+ 2024-05-23 13:03:26,946 INFO SenderThread:1072 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 13:03:27,125 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 13:03:27,125 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 13:03:27,126 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 13:03:27,127 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 13:03:27,218 DEBUG SenderThread:1072 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 13:03:27,522 INFO wandb-upload_0:1072 [upload_job.py:push():130] Uploaded file /tmp/tmpzbrvci8cwandb/r70fz28y-wandb-metadata.json
42
+ 2024-05-23 13:03:27,725 INFO Thread-12 :1072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-metadata.json
43
+ 2024-05-23 13:03:27,725 INFO Thread-12 :1072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log
44
+ 2024-05-23 13:03:27,726 INFO Thread-12 :1072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/requirements.txt
45
+ 2024-05-23 13:03:29,725 INFO Thread-12 :1072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log
46
+ 2024-05-23 13:03:32,223 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 13:03:37,585 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 13:03:37,732 INFO Thread-12 :1072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log
49
+ 2024-05-23 13:03:37,887 DEBUG SenderThread:1072 [sender.py:send():378] send: exit
50
+ 2024-05-23 13:03:37,887 INFO SenderThread:1072 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 13:03:37,887 INFO SenderThread:1072 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 13:03:37,889 INFO SenderThread:1072 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 13:03:37,889 INFO SenderThread:1072 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 13:03:37,889 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 13:03:37,889 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 13:03:37,890 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 13:03:37,890 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 13:03:37,890 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 13:03:37,890 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 13:03:37,890 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 13:03:37,890 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 13:03:37,890 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 13:03:37,890 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 13:03:37,890 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 13:03:37,890 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 13:03:37,890 INFO HandlerThread:1072 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 13:03:37,890 DEBUG SystemMonitor:1072 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 13:03:37,890 DEBUG SystemMonitor:1072 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 13:03:37,891 DEBUG SystemMonitor:1072 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 13:03:37,893 INFO HandlerThread:1072 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 13:03:37,893 INFO HandlerThread:1072 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 13:03:37,893 INFO HandlerThread:1072 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 13:03:37,894 INFO HandlerThread:1072 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 13:03:37,894 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 13:03:37,894 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 13:03:37,894 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 13:03:37,894 DEBUG SenderThread:1072 [sender.py:send():378] send: stats
78
+ 2024-05-23 13:03:37,895 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 13:03:37,895 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 13:03:37,895 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 13:03:37,895 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 13:03:37,896 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 13:03:37,896 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 13:03:37,896 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 13:03:37,896 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 13:03:37,896 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 13:03:37,896 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 13:03:37,896 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 13:03:37,896 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 13:03:37,896 DEBUG SenderThread:1072 [sender.py:send():378] send: summary
91
+ 2024-05-23 13:03:37,897 INFO SenderThread:1072 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 13:03:37,897 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 13:03:37,897 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 13:03:37,897 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 13:03:37,897 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 13:03:37,897 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 13:03:37,897 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 13:03:37,897 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 13:03:37,902 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 13:03:37,997 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 13:03:37,997 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 13:03:37,997 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 13:03:37,997 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 13:03:37,998 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 13:03:38,733 INFO Thread-12 :1072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/config.yaml
106
+ 2024-05-23 13:03:38,734 INFO Thread-12 :1072 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-summary.json
107
+ 2024-05-23 13:03:38,887 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 13:03:39,246 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 13:03:39,246 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 13:03:39,246 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 13:03:39,246 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 13:03:39,246 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 13:03:39,246 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 13:03:39,246 INFO SenderThread:1072 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 13:03:39,247 INFO SenderThread:1072 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 13:03:39,247 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 13:03:39,247 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 13:03:39,247 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 13:03:39,247 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 13:03:39,247 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 13:03:39,247 INFO SenderThread:1072 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 13:03:39,735 INFO SenderThread:1072 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log
123
+ 2024-05-23 13:03:39,735 INFO SenderThread:1072 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files
124
+ 2024-05-23 13:03:39,735 INFO SenderThread:1072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-summary.json wandb-summary.json
125
+ 2024-05-23 13:03:39,735 INFO SenderThread:1072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-metadata.json wandb-metadata.json
126
+ 2024-05-23 13:03:39,736 INFO SenderThread:1072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/requirements.txt requirements.txt
127
+ 2024-05-23 13:03:39,738 INFO SenderThread:1072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log output.log
128
+ 2024-05-23 13:03:39,740 INFO SenderThread:1072 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/config.yaml config.yaml
129
+ 2024-05-23 13:03:39,742 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 13:03:39,743 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 13:03:39,743 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 13:03:39,743 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 13:03:39,745 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 13:03:39,745 INFO SenderThread:1072 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 13:03:39,888 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: poll_exit
136
+ 2024-05-23 13:03:39,888 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: poll_exit
137
+ 2024-05-23 13:03:40,056 INFO wandb-upload_0:1072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/wandb-summary.json
138
+ 2024-05-23 13:03:40,347 INFO wandb-upload_2:1072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/output.log
139
+ 2024-05-23 13:03:40,355 INFO wandb-upload_1:1072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/requirements.txt
140
+ 2024-05-23 13:03:40,407 INFO wandb-upload_3:1072 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/files/config.yaml
141
+ 2024-05-23 13:03:40,607 INFO Thread-11 (_thread_body):1072 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 13:03:40,608 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 13:03:40,608 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 13:03:40,608 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 13:03:40,608 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 13:03:40,608 INFO SenderThread:1072 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 13:03:40,608 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 13:03:40,608 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 13:03:40,608 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 13:03:40,609 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 13:03:40,609 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 13:03:40,609 INFO SenderThread:1072 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 13:03:40,670 INFO SenderThread:1072 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 13:03:40,670 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 13:03:40,670 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 13:03:40,670 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 13:03:40,671 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 13:03:40,671 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 13:03:40,671 INFO SenderThread:1072 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 13:03:40,671 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 13:03:40,671 INFO HandlerThread:1072 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 13:03:40,671 DEBUG SenderThread:1072 [sender.py:send():378] send: final
163
+ 2024-05-23 13:03:40,671 DEBUG SenderThread:1072 [sender.py:send():378] send: footer
164
+ 2024-05-23 13:03:40,671 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 13:03:40,671 INFO SenderThread:1072 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 13:03:40,672 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 13:03:40,672 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: poll_exit
168
+ 2024-05-23 13:03:40,672 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 13:03:40,672 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: server_info
170
+ 2024-05-23 13:03:40,672 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 13:03:40,672 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-23 13:03:40,673 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-23 13:03:40,673 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 13:03:40,673 DEBUG SenderThread:1072 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 13:03:40,727 INFO MainThread:1072 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 13:03:40,727 INFO MainThread:1072 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 13:03:40,727 INFO MainThread:1072 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 13:03:40,727 DEBUG HandlerThread:1072 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 13:03:40,728 INFO HandlerThread:1072 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 13:03:41,673 INFO WriterThread:1072 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/run-1oo0voi6.wandb
181
+ 2024-05-23 13:03:41,727 INFO SenderThread:1072 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 13:03:41,727 INFO SenderThread:1072 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 13:03:41,727 INFO SenderThread:1072 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Configure stats pid to 917
3
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 13:03:26,415 WARNING MainThread:917 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 13:03:26,415 INFO MainThread:917 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug.log
11
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/logs/debug-internal.log
12
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 13:03:26,416 INFO MainThread:917 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 13:03:26,419 INFO MainThread:917 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 13:03:26,420 INFO MainThread:917 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 13:03:26,424 INFO MainThread:917 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 13:03:26,432 INFO MainThread:917 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 13:03:26,726 INFO MainThread:917 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 13:03:26,843 INFO MainThread:917 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 13:03:26,844 INFO MainThread:917 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 13:03:27,126 INFO MainThread:917 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 13:03:27,126 INFO MainThread:917 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 13:03:27,126 INFO MainThread:917 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 13:03:27,127 INFO MainThread:917 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 13:03:27,129 INFO MainThread:917 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 13:03:41,728 WARNING MsgRouterThr:917 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_130326-1oo0voi6/run-1oo0voi6.wandb ADDED
Binary file (11.1 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716469488
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:13:04:48,983 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:13:04:57,475 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:13:04:57,476 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:13:04:57,477 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000'}
6
+ 2024-05-23:13:04:59,760 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.3.0
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T13:04:48.781263",
5
+ "startedAt": "2024-05-23T13:04:48.261491",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2365.89415,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3400.0,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 211.62759399414062
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379539489746
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:04:48,284 INFO StreamThr :1586 [internal.py:wandb_internal():85] W&B internal server running at pid: 1586, started at: 2024-05-23 13:04:48.281109
2
+ 2024-05-23 13:04:48,288 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 13:04:48,288 INFO WriterThread:1586 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/run-vm5e7ag8.wandb
4
+ 2024-05-23 13:04:48,293 DEBUG SenderThread:1586 [sender.py:send():378] send: header
5
+ 2024-05-23 13:04:48,294 DEBUG SenderThread:1586 [sender.py:send():378] send: run
6
+ 2024-05-23 13:04:48,541 INFO SenderThread:1586 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files
7
+ 2024-05-23 13:04:48,541 INFO SenderThread:1586 [sender.py:_start_run_threads():1123] run started: vm5e7ag8 with start time 1716469488.280964
8
+ 2024-05-23 13:04:48,542 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 13:04:48,542 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 13:04:48,704 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 13:04:48,706 DEBUG HandlerThread:1586 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 13:04:48,706 DEBUG HandlerThread:1586 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 13:04:48,706 INFO HandlerThread:1586 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 13:04:48,706 INFO SystemMonitor:1586 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 13:04:48,706 INFO HandlerThread:1586 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 13:04:48,713 INFO SystemMonitor:1586 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 13:04:48,719 INFO SystemMonitor:1586 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 13:04:48,719 INFO SystemMonitor:1586 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 13:04:48,719 INFO SystemMonitor:1586 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 13:04:48,781 DEBUG HandlerThread:1586 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 13:04:48,784 DEBUG HandlerThread:1586 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 13:04:48,794 ERROR HandlerThread:1586 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 13:04:48,794 DEBUG HandlerThread:1586 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 13:04:48,794 DEBUG HandlerThread:1586 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 13:04:48,794 DEBUG HandlerThread:1586 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:04:48.781263', 'startedAt': '2024-05-23T13:04:48.261491', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step14000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2365.89415, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.62759399414062}}, 'memory': {'total': 1007.4379539489746}}
31
+ 2024-05-23 13:04:48,794 INFO HandlerThread:1586 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 13:04:48,794 INFO HandlerThread:1586 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 13:04:48,797 INFO HandlerThread:1586 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 13:04:48,802 DEBUG SenderThread:1586 [sender.py:send():378] send: files
35
+ 2024-05-23 13:04:48,803 INFO SenderThread:1586 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 13:04:48,976 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 13:04:48,976 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 13:04:48,977 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 13:04:48,978 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 13:04:49,135 DEBUG SenderThread:1586 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 13:04:49,355 INFO wandb-upload_0:1586 [upload_job.py:push():130] Uploaded file /tmp/tmpj2js6yoqwandb/bh6r6bog-wandb-metadata.json
42
+ 2024-05-23 13:04:49,543 INFO Thread-12 :1586 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/requirements.txt
43
+ 2024-05-23 13:04:49,543 INFO Thread-12 :1586 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log
44
+ 2024-05-23 13:04:49,543 INFO Thread-12 :1586 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-metadata.json
45
+ 2024-05-23 13:04:51,542 INFO Thread-12 :1586 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log
46
+ 2024-05-23 13:04:54,140 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 13:04:59,478 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 13:04:59,551 INFO Thread-12 :1586 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log
49
+ 2024-05-23 13:04:59,771 DEBUG SenderThread:1586 [sender.py:send():378] send: exit
50
+ 2024-05-23 13:04:59,771 INFO SenderThread:1586 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 13:04:59,772 INFO SenderThread:1586 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 13:04:59,773 INFO SenderThread:1586 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 13:04:59,773 INFO SenderThread:1586 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 13:04:59,773 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 13:04:59,774 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 13:04:59,774 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 13:04:59,774 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 13:04:59,774 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 13:04:59,774 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 13:04:59,774 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 13:04:59,774 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 13:04:59,774 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 13:04:59,774 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 13:04:59,774 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 13:04:59,774 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 13:04:59,774 INFO HandlerThread:1586 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 13:04:59,775 DEBUG SystemMonitor:1586 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 13:04:59,775 DEBUG SystemMonitor:1586 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 13:04:59,775 DEBUG SystemMonitor:1586 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 13:04:59,777 INFO HandlerThread:1586 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 13:04:59,778 INFO HandlerThread:1586 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 13:04:59,778 INFO HandlerThread:1586 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 13:04:59,778 INFO HandlerThread:1586 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 13:04:59,778 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 13:04:59,778 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 13:04:59,778 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 13:04:59,778 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
78
+ 2024-05-23 13:04:59,779 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 3
79
+ 2024-05-23 13:04:59,779 DEBUG SenderThread:1586 [sender.py:send():378] send: stats
80
+ 2024-05-23 13:04:59,780 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 13:04:59,780 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 13:04:59,780 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 13:04:59,780 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 13:04:59,780 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 13:04:59,780 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 13:04:59,780 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 13:04:59,780 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 13:04:59,780 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 13:04:59,780 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 13:04:59,780 DEBUG SenderThread:1586 [sender.py:send():378] send: summary
91
+ 2024-05-23 13:04:59,781 INFO SenderThread:1586 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 13:04:59,781 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 13:04:59,781 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 13:04:59,781 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 13:04:59,781 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 13:04:59,782 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 13:04:59,782 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 13:04:59,782 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 13:04:59,786 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 13:04:59,874 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 13:04:59,874 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 13:04:59,874 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 13:04:59,874 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 13:04:59,874 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 13:05:00,552 INFO Thread-12 :1586 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/config.yaml
106
+ 2024-05-23 13:05:00,552 INFO Thread-12 :1586 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-summary.json
107
+ 2024-05-23 13:05:00,772 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: poll_exit
108
+ 2024-05-23 13:05:01,161 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 8
109
+ 2024-05-23 13:05:01,161 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: poll_exit
110
+ 2024-05-23 13:05:01,161 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
111
+ 2024-05-23 13:05:01,161 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 8
112
+ 2024-05-23 13:05:01,161 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
113
+ 2024-05-23 13:05:01,161 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 8
114
+ 2024-05-23 13:05:01,161 INFO SenderThread:1586 [job_builder.py:build():432] Attempting to build job artifact
115
+ 2024-05-23 13:05:01,162 INFO SenderThread:1586 [job_builder.py:_get_source_type():576] no source found
116
+ 2024-05-23 13:05:01,162 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 9
117
+ 2024-05-23 13:05:01,162 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
118
+ 2024-05-23 13:05:01,162 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 9
119
+ 2024-05-23 13:05:01,162 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
120
+ 2024-05-23 13:05:01,162 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 9
121
+ 2024-05-23 13:05:01,162 INFO SenderThread:1586 [dir_watcher.py:finish():358] shutting down directory watcher
122
+ 2024-05-23 13:05:01,554 INFO SenderThread:1586 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log
123
+ 2024-05-23 13:05:01,554 INFO SenderThread:1586 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files
124
+ 2024-05-23 13:05:01,556 INFO SenderThread:1586 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-metadata.json wandb-metadata.json
125
+ 2024-05-23 13:05:01,556 INFO SenderThread:1586 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log output.log
126
+ 2024-05-23 13:05:01,556 INFO SenderThread:1586 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-summary.json wandb-summary.json
127
+ 2024-05-23 13:05:01,559 INFO SenderThread:1586 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/config.yaml config.yaml
128
+ 2024-05-23 13:05:01,561 INFO SenderThread:1586 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/requirements.txt requirements.txt
129
+ 2024-05-23 13:05:01,563 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 10
130
+ 2024-05-23 13:05:01,563 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 13:05:01,564 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 13:05:01,564 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 13:05:01,566 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 13:05:01,566 INFO SenderThread:1586 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 13:05:01,772 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: poll_exit
136
+ 2024-05-23 13:05:01,772 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: poll_exit
137
+ 2024-05-23 13:05:01,819 INFO wandb-upload_0:1586 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/output.log
138
+ 2024-05-23 13:05:02,170 INFO wandb-upload_2:1586 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/config.yaml
139
+ 2024-05-23 13:05:02,209 INFO wandb-upload_3:1586 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/requirements.txt
140
+ 2024-05-23 13:05:02,275 INFO wandb-upload_1:1586 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/files/wandb-summary.json
141
+ 2024-05-23 13:05:02,475 INFO Thread-11 (_thread_body):1586 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 13:05:02,475 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 13:05:02,475 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 13:05:02,475 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 13:05:02,475 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 13:05:02,476 INFO SenderThread:1586 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 13:05:02,476 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 13:05:02,476 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 13:05:02,476 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 13:05:02,476 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 13:05:02,476 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 13:05:02,476 INFO SenderThread:1586 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 13:05:02,535 INFO SenderThread:1586 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 13:05:02,535 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 13:05:02,535 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 13:05:02,535 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 13:05:02,535 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 13:05:02,535 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 13:05:02,535 INFO SenderThread:1586 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 13:05:02,535 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 13:05:02,535 INFO HandlerThread:1586 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 13:05:02,536 DEBUG SenderThread:1586 [sender.py:send():378] send: final
163
+ 2024-05-23 13:05:02,536 DEBUG SenderThread:1586 [sender.py:send():378] send: footer
164
+ 2024-05-23 13:05:02,536 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 13:05:02,536 INFO SenderThread:1586 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 13:05:02,536 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 13:05:02,536 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: poll_exit
168
+ 2024-05-23 13:05:02,537 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 13:05:02,537 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: server_info
170
+ 2024-05-23 13:05:02,537 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 13:05:02,537 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-23 13:05:02,537 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-23 13:05:02,537 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 13:05:02,537 DEBUG SenderThread:1586 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 13:05:02,593 INFO MainThread:1586 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 13:05:02,593 INFO MainThread:1586 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 13:05:02,593 INFO MainThread:1586 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 13:05:02,596 DEBUG HandlerThread:1586 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 13:05:02,596 INFO HandlerThread:1586 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 13:05:03,537 INFO WriterThread:1586 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/run-vm5e7ag8.wandb
181
+ 2024-05-23 13:05:03,593 INFO SenderThread:1586 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 13:05:03,593 INFO SenderThread:1586 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 13:05:03,593 INFO SenderThread:1586 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Configure stats pid to 1431
3
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 13:04:48,276 WARNING MainThread:1431 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug.log
11
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/logs/debug-internal.log
12
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 13:04:48,276 INFO MainThread:1431 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 13:04:48,279 INFO MainThread:1431 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 13:04:48,280 INFO MainThread:1431 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 13:04:48,284 INFO MainThread:1431 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 13:04:48,292 INFO MainThread:1431 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 13:04:48,542 INFO MainThread:1431 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 13:04:48,698 INFO MainThread:1431 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 13:04:48,698 INFO MainThread:1431 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 13:04:48,977 INFO MainThread:1431 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 13:04:48,977 INFO MainThread:1431 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 13:04:48,977 INFO MainThread:1431 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 13:04:48,977 INFO MainThread:1431 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 13:04:48,980 INFO MainThread:1431 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 13:05:03,597 WARNING MsgRouterThr:1431 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_130448-vm5e7ag8/run-vm5e7ag8.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240530_125856-v5b29ywz/run-v5b29ywz.wandb ADDED
Binary file (962 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
6
+ #
7
+ # Licensed under the Apache License, Version 2.0 (the "License");
8
+ # you may not use this file except in compliance with the License.
9
+ # You may obtain a copy of the License at
10
+ #
11
+ # http://www.apache.org/licenses/LICENSE-2.0
12
+ #
13
+ # Unless required by applicable law or agreed to in writing, software
14
+ # distributed under the License is distributed on an "AS IS" BASIS,
15
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ # See the License for the specific language governing permissions and
17
+ # limitations under the License.
18
+ from typing import TYPE_CHECKING
19
+
20
+ # rely on isort to merge the imports
21
+ from ...utils import (
22
+ OptionalDependencyNotAvailable,
23
+ _LazyModule,
24
+ is_torch_available,
25
+ is_tf_available,
26
+ )
27
+
28
+
29
+ _import_structure = {
30
+ "configuration_convnextv2": [
31
+ "CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP",
32
+ "ConvNextV2Config",
33
+ ]
34
+ }
35
+
36
+ try:
37
+ if not is_torch_available():
38
+ raise OptionalDependencyNotAvailable()
39
+ except OptionalDependencyNotAvailable:
40
+ pass
41
+ else:
42
+ _import_structure["modeling_convnextv2"] = [
43
+ "CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST",
44
+ "ConvNextV2ForImageClassification",
45
+ "ConvNextV2Model",
46
+ "ConvNextV2PreTrainedModel",
47
+ "ConvNextV2Backbone",
48
+ ]
49
+
50
+ try:
51
+ if not is_tf_available():
52
+ raise OptionalDependencyNotAvailable()
53
+ except OptionalDependencyNotAvailable:
54
+ pass
55
+ else:
56
+ _import_structure["modeling_tf_convnextv2"] = [
57
+ "TFConvNextV2ForImageClassification",
58
+ "TFConvNextV2Model",
59
+ "TFConvNextV2PreTrainedModel",
60
+ ]
61
+
62
+ if TYPE_CHECKING:
63
+ from .configuration_convnextv2 import (
64
+ CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP,
65
+ ConvNextV2Config,
66
+ )
67
+
68
+ try:
69
+ if not is_torch_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ from .modeling_convnextv2 import (
75
+ CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST,
76
+ ConvNextV2Backbone,
77
+ ConvNextV2ForImageClassification,
78
+ ConvNextV2Model,
79
+ ConvNextV2PreTrainedModel,
80
+ )
81
+
82
+ try:
83
+ if not is_tf_available():
84
+ raise OptionalDependencyNotAvailable()
85
+ except OptionalDependencyNotAvailable:
86
+ pass
87
+ else:
88
+ from .modeling_tf_convnextv2 import (
89
+ TFConvNextV2ForImageClassification,
90
+ TFConvNextV2Model,
91
+ TFConvNextV2PreTrainedModel,
92
+ )
93
+
94
+ else:
95
+ import sys
96
+
97
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
venv/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ ConvNeXTV2 model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
32
+ ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
34
+ [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ num_channels (`int`, *optional*, defaults to 3):
41
+ The number of input channels.
42
+ patch_size (`int`, optional, defaults to 4):
43
+ Patch size to use in the patch embedding layer.
44
+ num_stages (`int`, optional, defaults to 4):
45
+ The number of stages in the model.
46
+ hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
47
+ Dimensionality (hidden size) at each stage.
48
+ depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
49
+ Depth (number of blocks) for each stage.
50
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
51
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
52
+ `"selu"` and `"gelu_new"` are supported.
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
58
+ The drop rate for stochastic depth.
59
+ out_features (`List[str]`, *optional*):
60
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
61
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
62
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
63
+ same order as defined in the `stage_names` attribute.
64
+ out_indices (`List[int]`, *optional*):
65
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
66
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
67
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
68
+ same order as defined in the `stage_names` attribute.
69
+
70
+ Example:
71
+ ```python
72
+ >>> from transformers import ConvNeXTV2Config, ConvNextV2Model
73
+
74
+ >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
75
+ >>> configuration = ConvNeXTV2Config()
76
+
77
+ >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
78
+ >>> model = ConvNextV2Model(configuration)
79
+
80
+ >>> # Accessing the model configuration
81
+ >>> configuration = model.config
82
+ ```"""
83
+
84
+ model_type = "convnextv2"
85
+
86
+ def __init__(
87
+ self,
88
+ num_channels=3,
89
+ patch_size=4,
90
+ num_stages=4,
91
+ hidden_sizes=None,
92
+ depths=None,
93
+ hidden_act="gelu",
94
+ initializer_range=0.02,
95
+ layer_norm_eps=1e-12,
96
+ drop_path_rate=0.0,
97
+ image_size=224,
98
+ out_features=None,
99
+ out_indices=None,
100
+ **kwargs,
101
+ ):
102
+ super().__init__(**kwargs)
103
+
104
+ self.num_channels = num_channels
105
+ self.patch_size = patch_size
106
+ self.num_stages = num_stages
107
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
108
+ self.depths = [3, 3, 9, 3] if depths is None else depths
109
+ self.hidden_act = hidden_act
110
+ self.initializer_range = initializer_range
111
+ self.layer_norm_eps = layer_norm_eps
112
+ self.drop_path_rate = drop_path_rate
113
+ self.image_size = image_size
114
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
115
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
116
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
117
+ )
venv/lib/python3.10/site-packages/transformers/models/convnextv2/convert_convnextv2_to_pytorch.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert ConvNeXTV2 checkpoints from the original repository.
16
+
17
+ URL: https://github.com/facebookresearch/ConvNeXt"""
18
+
19
+ import argparse
20
+ import json
21
+ import os
22
+
23
+ import requests
24
+ import torch
25
+ from huggingface_hub import hf_hub_download
26
+ from PIL import Image
27
+
28
+ from transformers import ConvNextImageProcessor, ConvNextV2Config, ConvNextV2ForImageClassification
29
+ from transformers.image_utils import PILImageResampling
30
+ from transformers.utils import logging
31
+
32
+
33
+ logging.set_verbosity_info()
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def get_convnextv2_config(checkpoint_url):
38
+ config = ConvNextV2Config()
39
+
40
+ if "atto" in checkpoint_url:
41
+ depths = [2, 2, 6, 2]
42
+ hidden_sizes = [40, 80, 160, 320]
43
+ if "femto" in checkpoint_url:
44
+ depths = [2, 2, 6, 2]
45
+ hidden_sizes = [48, 96, 192, 384]
46
+ if "pico" in checkpoint_url:
47
+ depths = [2, 2, 6, 2]
48
+ hidden_sizes = [64, 128, 256, 512]
49
+ if "nano" in checkpoint_url:
50
+ depths = [2, 2, 8, 2]
51
+ hidden_sizes = [80, 160, 320, 640]
52
+ if "tiny" in checkpoint_url:
53
+ depths = [3, 3, 9, 3]
54
+ hidden_sizes = [96, 192, 384, 768]
55
+ if "base" in checkpoint_url:
56
+ depths = [3, 3, 27, 3]
57
+ hidden_sizes = [128, 256, 512, 1024]
58
+ if "large" in checkpoint_url:
59
+ depths = [3, 3, 27, 3]
60
+ hidden_sizes = [192, 384, 768, 1536]
61
+ if "huge" in checkpoint_url:
62
+ depths = [3, 3, 27, 3]
63
+ hidden_sizes = [352, 704, 1408, 2816]
64
+
65
+ num_labels = 1000
66
+ filename = "imagenet-1k-id2label.json"
67
+ expected_shape = (1, 1000)
68
+
69
+ repo_id = "huggingface/label-files"
70
+ config.num_labels = num_labels
71
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
72
+ id2label = {int(k): v for k, v in id2label.items()}
73
+
74
+ config.id2label = id2label
75
+ config.label2id = {v: k for k, v in id2label.items()}
76
+ config.hidden_sizes = hidden_sizes
77
+ config.depths = depths
78
+
79
+ return config, expected_shape
80
+
81
+
82
+ def rename_key(name):
83
+ if "downsample_layers.0.0" in name:
84
+ name = name.replace("downsample_layers.0.0", "embeddings.patch_embeddings")
85
+ if "downsample_layers.0.1" in name:
86
+ name = name.replace("downsample_layers.0.1", "embeddings.norm") # we rename to layernorm later on
87
+ if "downsample_layers.1.0" in name:
88
+ name = name.replace("downsample_layers.1.0", "stages.1.downsampling_layer.0")
89
+ if "downsample_layers.1.1" in name:
90
+ name = name.replace("downsample_layers.1.1", "stages.1.downsampling_layer.1")
91
+ if "downsample_layers.2.0" in name:
92
+ name = name.replace("downsample_layers.2.0", "stages.2.downsampling_layer.0")
93
+ if "downsample_layers.2.1" in name:
94
+ name = name.replace("downsample_layers.2.1", "stages.2.downsampling_layer.1")
95
+ if "downsample_layers.3.0" in name:
96
+ name = name.replace("downsample_layers.3.0", "stages.3.downsampling_layer.0")
97
+ if "downsample_layers.3.1" in name:
98
+ name = name.replace("downsample_layers.3.1", "stages.3.downsampling_layer.1")
99
+ if "stages" in name and "downsampling_layer" not in name:
100
+ # stages.0.0. for instance should be renamed to stages.0.layers.0.
101
+ name = name[: len("stages.0")] + ".layers" + name[len("stages.0") :]
102
+ if "gamma" in name:
103
+ name = name.replace("gamma", "weight")
104
+ if "beta" in name:
105
+ name = name.replace("beta", "bias")
106
+ if "stages" in name:
107
+ name = name.replace("stages", "encoder.stages")
108
+ if "norm" in name:
109
+ name = name.replace("norm", "layernorm")
110
+ if "head" in name:
111
+ name = name.replace("head", "classifier")
112
+
113
+ return name
114
+
115
+
116
+ # We will verify our results on an image of cute cats
117
+ def prepare_img():
118
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
119
+ im = Image.open(requests.get(url, stream=True).raw)
120
+ return im
121
+
122
+
123
+ def convert_preprocessor(checkpoint_url):
124
+ if "224" in checkpoint_url:
125
+ size = 224
126
+ crop_pct = 224 / 256
127
+ elif "384" in checkpoint_url:
128
+ size = 384
129
+ crop_pct = None
130
+ else:
131
+ size = 512
132
+ crop_pct = None
133
+
134
+ return ConvNextImageProcessor(
135
+ size=size,
136
+ crop_pct=crop_pct,
137
+ image_mean=[0.485, 0.456, 0.406],
138
+ image_std=[0.229, 0.224, 0.225],
139
+ resample=PILImageResampling.BICUBIC,
140
+ )
141
+
142
+
143
+ @torch.no_grad()
144
+ def convert_convnextv2_checkpoint(checkpoint_url, pytorch_dump_folder_path, save_model, push_to_hub):
145
+ """
146
+ Copy/paste/tweak model's weights to our ConvNeXTV2 structure.
147
+ """
148
+ print("Downloading original model from checkpoint...")
149
+ # define ConvNeXTV2 configuration based on URL
150
+ config, expected_shape = get_convnextv2_config(checkpoint_url)
151
+ # load original state_dict from URL
152
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"]
153
+
154
+ print("Converting model parameters...")
155
+ # rename keys
156
+ for key in state_dict.copy().keys():
157
+ val = state_dict.pop(key)
158
+ state_dict[rename_key(key)] = val
159
+ # add prefix to all keys expect classifier head
160
+ for key in state_dict.copy().keys():
161
+ val = state_dict.pop(key)
162
+ if not key.startswith("classifier"):
163
+ key = "convnextv2." + key
164
+ state_dict[key] = val
165
+
166
+ # load HuggingFace model
167
+ model = ConvNextV2ForImageClassification(config)
168
+ model.load_state_dict(state_dict)
169
+ model.eval()
170
+
171
+ # Check outputs on an image, prepared by ConvNextImageProcessor
172
+ preprocessor = convert_preprocessor(checkpoint_url)
173
+ inputs = preprocessor(images=prepare_img(), return_tensors="pt")
174
+ logits = model(**inputs).logits
175
+
176
+ # note: the logits below were obtained without center cropping
177
+ if checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt":
178
+ expected_logits = torch.tensor([-0.3930, 0.1747, -0.5246, 0.4177, 0.4295])
179
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt":
180
+ expected_logits = torch.tensor([-0.1727, -0.5341, -0.7818, -0.4745, -0.6566])
181
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt":
182
+ expected_logits = torch.tensor([-0.0333, 0.1563, -0.9137, 0.1054, 0.0381])
183
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt":
184
+ expected_logits = torch.tensor([-0.1744, -0.1555, -0.0713, 0.0950, -0.1431])
185
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt":
186
+ expected_logits = torch.tensor([0.9996, 0.1966, -0.4386, -0.3472, 0.6661])
187
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt":
188
+ expected_logits = torch.tensor([-0.2553, -0.6708, -0.1359, 0.2518, -0.2488])
189
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt":
190
+ expected_logits = torch.tensor([-0.0673, -0.5627, -0.3753, -0.2722, 0.0178])
191
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt":
192
+ expected_logits = torch.tensor([-0.6377, -0.7458, -0.2150, 0.1184, -0.0597])
193
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt":
194
+ expected_logits = torch.tensor([1.0799, 0.2322, -0.8860, 1.0219, 0.6231])
195
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt":
196
+ expected_logits = torch.tensor([0.3766, 0.4917, -1.1426, 0.9942, 0.6024])
197
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt":
198
+ expected_logits = torch.tensor([0.4220, -0.6919, -0.4317, -0.2881, -0.6609])
199
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt":
200
+ expected_logits = torch.tensor([0.1082, -0.8286, -0.5095, 0.4681, -0.8085])
201
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt":
202
+ expected_logits = torch.tensor([-0.2419, -0.6221, 0.2176, -0.0980, -0.7527])
203
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt":
204
+ expected_logits = torch.tensor([0.0391, -0.4371, 0.3786, 0.1251, -0.2784])
205
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt":
206
+ expected_logits = torch.tensor([-0.0504, 0.5636, -0.1729, -0.6507, -0.3949])
207
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt":
208
+ expected_logits = torch.tensor([0.3560, 0.9486, 0.3149, -0.2667, -0.5138])
209
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt":
210
+ expected_logits = torch.tensor([-0.2469, -0.4550, -0.5853, -0.0810, 0.0309])
211
+ elif checkpoint_url == "https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt":
212
+ expected_logits = torch.tensor([-0.3090, 0.0802, -0.0682, -0.1979, -0.2826])
213
+ else:
214
+ raise ValueError(f"Unknown URL: {checkpoint_url}")
215
+
216
+ assert torch.allclose(logits[0, :5], expected_logits, atol=1e-3)
217
+ assert logits.shape == expected_shape
218
+ print("Model outputs match the original results!")
219
+
220
+ if save_model:
221
+ print("Saving model to local...")
222
+ # Create folder to save model
223
+ if not os.path.isdir(pytorch_dump_folder_path):
224
+ os.mkdir(pytorch_dump_folder_path)
225
+
226
+ model.save_pretrained(pytorch_dump_folder_path)
227
+ preprocessor.save_pretrained(pytorch_dump_folder_path)
228
+
229
+ model_name = "convnextv2"
230
+ if "atto" in checkpoint_url:
231
+ model_name += "-atto"
232
+ if "femto" in checkpoint_url:
233
+ model_name += "-femto"
234
+ if "pico" in checkpoint_url:
235
+ model_name += "-pico"
236
+ if "nano" in checkpoint_url:
237
+ model_name += "-nano"
238
+ elif "tiny" in checkpoint_url:
239
+ model_name += "-tiny"
240
+ elif "base" in checkpoint_url:
241
+ model_name += "-base"
242
+ elif "large" in checkpoint_url:
243
+ model_name += "-large"
244
+ elif "huge" in checkpoint_url:
245
+ model_name += "-huge"
246
+ if "22k" in checkpoint_url and "1k" not in checkpoint_url:
247
+ model_name += "-22k"
248
+ elif "22k" in checkpoint_url and "1k" in checkpoint_url:
249
+ model_name += "-22k-1k"
250
+ elif "1k" in checkpoint_url:
251
+ model_name += "-1k"
252
+ if "224" in checkpoint_url:
253
+ model_name += "-224"
254
+ elif "384" in checkpoint_url:
255
+ model_name += "-384"
256
+ elif "512" in checkpoint_url:
257
+ model_name += "-512"
258
+
259
+ if push_to_hub:
260
+ print(f"Pushing {model_name} to the hub...")
261
+ model.push_to_hub(model_name)
262
+ preprocessor.push_to_hub(model_name)
263
+
264
+
265
+ if __name__ == "__main__":
266
+ parser = argparse.ArgumentParser()
267
+ # Required parameters
268
+ parser.add_argument(
269
+ "--checkpoint_url",
270
+ default="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt",
271
+ type=str,
272
+ help="URL of the original ConvNeXTV2 checkpoint you'd like to convert.",
273
+ )
274
+ parser.add_argument(
275
+ "--pytorch_dump_folder_path",
276
+ default="model",
277
+ type=str,
278
+ help="Path to the output PyTorch model directory.",
279
+ )
280
+ parser.add_argument("--save_model", action="store_true", help="Save model to local")
281
+ parser.add_argument("--push_to_hub", action="store_true", help="Push model and image preprocessor to the hub")
282
+
283
+ args = parser.parse_args()
284
+ convert_convnextv2_checkpoint(
285
+ args.checkpoint_url, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub
286
+ )
venv/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch ConvNextV2 model."""
16
+
17
+
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BackboneOutput,
28
+ BaseModelOutputWithNoAttention,
29
+ BaseModelOutputWithPoolingAndNoAttention,
30
+ ImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...utils import (
34
+ add_code_sample_docstrings,
35
+ add_start_docstrings,
36
+ add_start_docstrings_to_model_forward,
37
+ logging,
38
+ replace_return_docstrings,
39
+ )
40
+ from ...utils.backbone_utils import BackboneMixin
41
+ from .configuration_convnextv2 import ConvNextV2Config
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ # General docstring
47
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
51
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
52
+
53
+ # Image classification docstring
54
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
55
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
56
+
57
+
58
+ from ..deprecated._archive_maps import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
59
+
60
+
61
+ # Copied from transformers.models.beit.modeling_beit.drop_path
62
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
63
+ """
64
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
65
+
66
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
67
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
68
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
69
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
70
+ argument.
71
+ """
72
+ if drop_prob == 0.0 or not training:
73
+ return input
74
+ keep_prob = 1 - drop_prob
75
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
76
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
77
+ random_tensor.floor_() # binarize
78
+ output = input.div(keep_prob) * random_tensor
79
+ return output
80
+
81
+
82
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
83
+ class ConvNextV2DropPath(nn.Module):
84
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
85
+
86
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
87
+ super().__init__()
88
+ self.drop_prob = drop_prob
89
+
90
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
91
+ return drop_path(hidden_states, self.drop_prob, self.training)
92
+
93
+ def extra_repr(self) -> str:
94
+ return "p={}".format(self.drop_prob)
95
+
96
+
97
+ class ConvNextV2GRN(nn.Module):
98
+ """GRN (Global Response Normalization) layer"""
99
+
100
+ def __init__(self, dim: int):
101
+ super().__init__()
102
+ self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
103
+ self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
104
+
105
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
106
+ # Compute and normalize global spatial feature maps
107
+ global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
108
+ norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
109
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
110
+
111
+ return hidden_states
112
+
113
+
114
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
115
+ class ConvNextV2LayerNorm(nn.Module):
116
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
117
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
118
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
119
+ """
120
+
121
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
122
+ super().__init__()
123
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
124
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
125
+ self.eps = eps
126
+ self.data_format = data_format
127
+ if self.data_format not in ["channels_last", "channels_first"]:
128
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
129
+ self.normalized_shape = (normalized_shape,)
130
+
131
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
132
+ if self.data_format == "channels_last":
133
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
134
+ elif self.data_format == "channels_first":
135
+ input_dtype = x.dtype
136
+ x = x.float()
137
+ u = x.mean(1, keepdim=True)
138
+ s = (x - u).pow(2).mean(1, keepdim=True)
139
+ x = (x - u) / torch.sqrt(s + self.eps)
140
+ x = x.to(dtype=input_dtype)
141
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
142
+ return x
143
+
144
+
145
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
146
+ class ConvNextV2Embeddings(nn.Module):
147
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
148
+ found in src/transformers/models/swin/modeling_swin.py.
149
+ """
150
+
151
+ def __init__(self, config):
152
+ super().__init__()
153
+ self.patch_embeddings = nn.Conv2d(
154
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
155
+ )
156
+ self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
157
+ self.num_channels = config.num_channels
158
+
159
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
160
+ num_channels = pixel_values.shape[1]
161
+ if num_channels != self.num_channels:
162
+ raise ValueError(
163
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
164
+ )
165
+ embeddings = self.patch_embeddings(pixel_values)
166
+ embeddings = self.layernorm(embeddings)
167
+ return embeddings
168
+
169
+
170
+ class ConvNextV2Layer(nn.Module):
171
+ """This corresponds to the `Block` class in the original implementation.
172
+
173
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
174
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
175
+
176
+ The authors used (2) as they find it slightly faster in PyTorch.
177
+
178
+ Args:
179
+ config ([`ConvNextV2Config`]): Model configuration class.
180
+ dim (`int`): Number of input channels.
181
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
182
+ """
183
+
184
+ def __init__(self, config, dim, drop_path=0):
185
+ super().__init__()
186
+ # depthwise conv
187
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
188
+ self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
189
+ # pointwise/1x1 convs, implemented with linear layers
190
+ self.pwconv1 = nn.Linear(dim, 4 * dim)
191
+ self.act = ACT2FN[config.hidden_act]
192
+ self.grn = ConvNextV2GRN(4 * dim)
193
+ self.pwconv2 = nn.Linear(4 * dim, dim)
194
+ self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
195
+
196
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
197
+ input = hidden_states
198
+ x = self.dwconv(hidden_states)
199
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
200
+ x = x.permute(0, 2, 3, 1)
201
+ x = self.layernorm(x)
202
+ x = self.pwconv1(x)
203
+ x = self.act(x)
204
+ x = self.grn(x)
205
+ x = self.pwconv2(x)
206
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
207
+ x = x.permute(0, 3, 1, 2)
208
+
209
+ x = input + self.drop_path(x)
210
+ return x
211
+
212
+
213
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
214
+ class ConvNextV2Stage(nn.Module):
215
+ """ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
216
+
217
+ Args:
218
+ config ([`ConvNextV2Config`]): Model configuration class.
219
+ in_channels (`int`): Number of input channels.
220
+ out_channels (`int`): Number of output channels.
221
+ depth (`int`): Number of residual blocks.
222
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
223
+ """
224
+
225
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
226
+ super().__init__()
227
+
228
+ if in_channels != out_channels or stride > 1:
229
+ self.downsampling_layer = nn.Sequential(
230
+ ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
231
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
232
+ )
233
+ else:
234
+ self.downsampling_layer = nn.Identity()
235
+ drop_path_rates = drop_path_rates or [0.0] * depth
236
+ self.layers = nn.Sequential(
237
+ *[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
238
+ )
239
+
240
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
241
+ hidden_states = self.downsampling_layer(hidden_states)
242
+ hidden_states = self.layers(hidden_states)
243
+ return hidden_states
244
+
245
+
246
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
247
+ class ConvNextV2Encoder(nn.Module):
248
+ def __init__(self, config):
249
+ super().__init__()
250
+ self.stages = nn.ModuleList()
251
+ drop_path_rates = [
252
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
253
+ ]
254
+ prev_chs = config.hidden_sizes[0]
255
+ for i in range(config.num_stages):
256
+ out_chs = config.hidden_sizes[i]
257
+ stage = ConvNextV2Stage(
258
+ config,
259
+ in_channels=prev_chs,
260
+ out_channels=out_chs,
261
+ stride=2 if i > 0 else 1,
262
+ depth=config.depths[i],
263
+ drop_path_rates=drop_path_rates[i],
264
+ )
265
+ self.stages.append(stage)
266
+ prev_chs = out_chs
267
+
268
+ def forward(
269
+ self,
270
+ hidden_states: torch.FloatTensor,
271
+ output_hidden_states: Optional[bool] = False,
272
+ return_dict: Optional[bool] = True,
273
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
274
+ all_hidden_states = () if output_hidden_states else None
275
+
276
+ for i, layer_module in enumerate(self.stages):
277
+ if output_hidden_states:
278
+ all_hidden_states = all_hidden_states + (hidden_states,)
279
+
280
+ hidden_states = layer_module(hidden_states)
281
+
282
+ if output_hidden_states:
283
+ all_hidden_states = all_hidden_states + (hidden_states,)
284
+
285
+ if not return_dict:
286
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
287
+
288
+ return BaseModelOutputWithNoAttention(
289
+ last_hidden_state=hidden_states,
290
+ hidden_states=all_hidden_states,
291
+ )
292
+
293
+
294
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel with ConvNext->ConvNextV2, convnext->convnextv2
295
+ class ConvNextV2PreTrainedModel(PreTrainedModel):
296
+ """
297
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
298
+ models.
299
+ """
300
+
301
+ config_class = ConvNextV2Config
302
+ base_model_prefix = "convnextv2"
303
+ main_input_name = "pixel_values"
304
+
305
+ def _init_weights(self, module):
306
+ """Initialize the weights"""
307
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
308
+ # Slightly different from the TF version which uses truncated_normal for initialization
309
+ # cf https://github.com/pytorch/pytorch/pull/5617
310
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
311
+ if module.bias is not None:
312
+ module.bias.data.zero_()
313
+ elif isinstance(module, nn.LayerNorm):
314
+ module.bias.data.zero_()
315
+ module.weight.data.fill_(1.0)
316
+
317
+
318
+ CONVNEXTV2_START_DOCSTRING = r"""
319
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
320
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
321
+ behavior.
322
+
323
+ Parameters:
324
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
325
+ Initializing with a config file does not load the weights associated with the model, only the
326
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
327
+ """
328
+
329
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
330
+ Args:
331
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
332
+ Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See
333
+ [`ConvNextImageProcessor.__call__`] for details.
334
+ output_hidden_states (`bool`, *optional*):
335
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
336
+ more detail.
337
+ return_dict (`bool`, *optional*):
338
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
339
+ """
340
+
341
+
342
+ @add_start_docstrings(
343
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
344
+ CONVNEXTV2_START_DOCSTRING,
345
+ )
346
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
347
+ class ConvNextV2Model(ConvNextV2PreTrainedModel):
348
+ def __init__(self, config):
349
+ super().__init__(config)
350
+ self.config = config
351
+
352
+ self.embeddings = ConvNextV2Embeddings(config)
353
+ self.encoder = ConvNextV2Encoder(config)
354
+
355
+ # final layernorm layer
356
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
357
+
358
+ # Initialize weights and apply final processing
359
+ self.post_init()
360
+
361
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
362
+ @add_code_sample_docstrings(
363
+ checkpoint=_CHECKPOINT_FOR_DOC,
364
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
365
+ config_class=_CONFIG_FOR_DOC,
366
+ modality="vision",
367
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
368
+ )
369
+ def forward(
370
+ self,
371
+ pixel_values: torch.FloatTensor = None,
372
+ output_hidden_states: Optional[bool] = None,
373
+ return_dict: Optional[bool] = None,
374
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
375
+ output_hidden_states = (
376
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
377
+ )
378
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
379
+
380
+ if pixel_values is None:
381
+ raise ValueError("You have to specify pixel_values")
382
+
383
+ embedding_output = self.embeddings(pixel_values)
384
+
385
+ encoder_outputs = self.encoder(
386
+ embedding_output,
387
+ output_hidden_states=output_hidden_states,
388
+ return_dict=return_dict,
389
+ )
390
+
391
+ last_hidden_state = encoder_outputs[0]
392
+
393
+ # global average pooling, (N, C, H, W) -> (N, C)
394
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
395
+
396
+ if not return_dict:
397
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
398
+
399
+ return BaseModelOutputWithPoolingAndNoAttention(
400
+ last_hidden_state=last_hidden_state,
401
+ pooler_output=pooled_output,
402
+ hidden_states=encoder_outputs.hidden_states,
403
+ )
404
+
405
+
406
+ @add_start_docstrings(
407
+ """
408
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
409
+ ImageNet.
410
+ """,
411
+ CONVNEXTV2_START_DOCSTRING,
412
+ )
413
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
414
+ class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
415
+ def __init__(self, config):
416
+ super().__init__(config)
417
+
418
+ self.num_labels = config.num_labels
419
+ self.convnextv2 = ConvNextV2Model(config)
420
+
421
+ # Classifier head
422
+ self.classifier = (
423
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
424
+ )
425
+
426
+ # Initialize weights and apply final processing
427
+ self.post_init()
428
+
429
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
430
+ @add_code_sample_docstrings(
431
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
432
+ output_type=ImageClassifierOutputWithNoAttention,
433
+ config_class=_CONFIG_FOR_DOC,
434
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
435
+ )
436
+ def forward(
437
+ self,
438
+ pixel_values: torch.FloatTensor = None,
439
+ labels: Optional[torch.LongTensor] = None,
440
+ output_hidden_states: Optional[bool] = None,
441
+ return_dict: Optional[bool] = None,
442
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
443
+ r"""
444
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
445
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
446
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
447
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
448
+ """
449
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
450
+
451
+ outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
452
+
453
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
454
+
455
+ logits = self.classifier(pooled_output)
456
+
457
+ loss = None
458
+ if labels is not None:
459
+ if self.config.problem_type is None:
460
+ if self.num_labels == 1:
461
+ self.config.problem_type = "regression"
462
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
463
+ self.config.problem_type = "single_label_classification"
464
+ else:
465
+ self.config.problem_type = "multi_label_classification"
466
+
467
+ if self.config.problem_type == "regression":
468
+ loss_fct = MSELoss()
469
+ if self.num_labels == 1:
470
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
471
+ else:
472
+ loss = loss_fct(logits, labels)
473
+ elif self.config.problem_type == "single_label_classification":
474
+ loss_fct = CrossEntropyLoss()
475
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
476
+ elif self.config.problem_type == "multi_label_classification":
477
+ loss_fct = BCEWithLogitsLoss()
478
+ loss = loss_fct(logits, labels)
479
+ if not return_dict:
480
+ output = (logits,) + outputs[2:]
481
+ return ((loss,) + output) if loss is not None else output
482
+
483
+ return ImageClassifierOutputWithNoAttention(
484
+ loss=loss,
485
+ logits=logits,
486
+ hidden_states=outputs.hidden_states,
487
+ )
488
+
489
+
490
+ @add_start_docstrings(
491
+ """
492
+ ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
493
+ """,
494
+ CONVNEXTV2_START_DOCSTRING,
495
+ )
496
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
497
+ class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
498
+ def __init__(self, config):
499
+ super().__init__(config)
500
+ super()._init_backbone(config)
501
+
502
+ self.embeddings = ConvNextV2Embeddings(config)
503
+ self.encoder = ConvNextV2Encoder(config)
504
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
505
+
506
+ # Add layer norms to hidden states of out_features
507
+ hidden_states_norms = {}
508
+ for stage, num_channels in zip(self._out_features, self.channels):
509
+ hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
510
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
511
+
512
+ # initialize weights and apply final processing
513
+ self.post_init()
514
+
515
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
516
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
517
+ def forward(
518
+ self,
519
+ pixel_values: torch.Tensor,
520
+ output_hidden_states: Optional[bool] = None,
521
+ return_dict: Optional[bool] = None,
522
+ ) -> BackboneOutput:
523
+ """
524
+ Returns:
525
+
526
+ Examples:
527
+
528
+ ```python
529
+ >>> from transformers import AutoImageProcessor, AutoBackbone
530
+ >>> import torch
531
+ >>> from PIL import Image
532
+ >>> import requests
533
+
534
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
535
+ >>> image = Image.open(requests.get(url, stream=True).raw)
536
+
537
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
538
+ >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
539
+
540
+ >>> inputs = processor(image, return_tensors="pt")
541
+ >>> outputs = model(**inputs)
542
+ ```"""
543
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
544
+ output_hidden_states = (
545
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
546
+ )
547
+
548
+ embedding_output = self.embeddings(pixel_values)
549
+
550
+ outputs = self.encoder(
551
+ embedding_output,
552
+ output_hidden_states=True,
553
+ return_dict=return_dict,
554
+ )
555
+
556
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
557
+
558
+ feature_maps = ()
559
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
560
+ if stage in self.out_features:
561
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
562
+ feature_maps += (hidden_state,)
563
+
564
+ if not return_dict:
565
+ output = (feature_maps,)
566
+ if output_hidden_states:
567
+ output += (hidden_states,)
568
+ return output
569
+
570
+ return BackboneOutput(
571
+ feature_maps=feature_maps,
572
+ hidden_states=hidden_states if output_hidden_states else None,
573
+ attentions=None,
574
+ )
venv/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 ConvNextV2 model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import tensorflow as tf
24
+
25
+ from ...activations_tf import get_tf_activation
26
+ from ...modeling_tf_outputs import (
27
+ TFBaseModelOutputWithNoAttention,
28
+ TFBaseModelOutputWithPooling,
29
+ TFBaseModelOutputWithPoolingAndNoAttention,
30
+ TFImageClassifierOutputWithNoAttention,
31
+ )
32
+ from ...modeling_tf_utils import (
33
+ TFModelInputType,
34
+ TFPreTrainedModel,
35
+ TFSequenceClassificationLoss,
36
+ get_initializer,
37
+ keras,
38
+ keras_serializable,
39
+ unpack_inputs,
40
+ )
41
+ from ...tf_utils import shape_list
42
+ from ...utils import (
43
+ add_code_sample_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ logging,
47
+ )
48
+ from .configuration_convnextv2 import ConvNextV2Config
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ # General docstring
54
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
55
+
56
+ # Base docstring
57
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
58
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
59
+
60
+ # Image classification docstring
61
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
62
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
63
+
64
+
65
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2
66
+ class TFConvNextV2DropPath(keras.layers.Layer):
67
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
68
+ References:
69
+ (1) github.com:rwightman/pytorch-image-models
70
+ """
71
+
72
+ def __init__(self, drop_path: float, **kwargs):
73
+ super().__init__(**kwargs)
74
+ self.drop_path = drop_path
75
+
76
+ def call(self, x: tf.Tensor, training=None):
77
+ if training:
78
+ keep_prob = 1 - self.drop_path
79
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
80
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
81
+ random_tensor = tf.floor(random_tensor)
82
+ return (x / keep_prob) * random_tensor
83
+ return x
84
+
85
+
86
+ class TFConvNextV2GRN(keras.layers.Layer):
87
+ """GRN (Global Response Normalization) layer"""
88
+
89
+ def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
90
+ super().__init__(**kwargs)
91
+ self.dim = dim
92
+
93
+ def build(self, input_shape: tf.TensorShape = None):
94
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
95
+ self.weight = self.add_weight(
96
+ name="weight",
97
+ shape=(1, 1, 1, self.dim),
98
+ initializer=keras.initializers.Zeros(),
99
+ )
100
+ self.bias = self.add_weight(
101
+ name="bias",
102
+ shape=(1, 1, 1, self.dim),
103
+ initializer=keras.initializers.Zeros(),
104
+ )
105
+ return super().build(input_shape)
106
+
107
+ def call(self, hidden_states: tf.Tensor):
108
+ global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True)
109
+ norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6)
110
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
111
+ return hidden_states
112
+
113
+
114
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2
115
+ class TFConvNextV2Embeddings(keras.layers.Layer):
116
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
117
+ found in src/transformers/models/swin/modeling_swin.py.
118
+ """
119
+
120
+ def __init__(self, config: ConvNextV2Config, **kwargs):
121
+ super().__init__(**kwargs)
122
+ self.patch_embeddings = keras.layers.Conv2D(
123
+ filters=config.hidden_sizes[0],
124
+ kernel_size=config.patch_size,
125
+ strides=config.patch_size,
126
+ name="patch_embeddings",
127
+ kernel_initializer=get_initializer(config.initializer_range),
128
+ bias_initializer=keras.initializers.Zeros(),
129
+ )
130
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
131
+ self.num_channels = config.num_channels
132
+ self.config = config
133
+
134
+ def call(self, pixel_values):
135
+ if isinstance(pixel_values, dict):
136
+ pixel_values = pixel_values["pixel_values"]
137
+
138
+ tf.debugging.assert_equal(
139
+ shape_list(pixel_values)[1],
140
+ self.num_channels,
141
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
142
+ )
143
+
144
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
145
+ # So change the input format from `NCHW` to `NHWC`.
146
+ # shape = (batch_size, in_height, in_width, in_channels)
147
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
148
+
149
+ embeddings = self.patch_embeddings(pixel_values)
150
+ embeddings = self.layernorm(embeddings)
151
+ return embeddings
152
+
153
+ def build(self, input_shape=None):
154
+ if self.built:
155
+ return
156
+ self.built = True
157
+ if getattr(self, "patch_embeddings", None) is not None:
158
+ with tf.name_scope(self.patch_embeddings.name):
159
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
160
+ if getattr(self, "layernorm", None) is not None:
161
+ with tf.name_scope(self.layernorm.name):
162
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
163
+
164
+
165
+ class TFConvNextV2Layer(keras.layers.Layer):
166
+ """This corresponds to the `Block` class in the original implementation.
167
+
168
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
169
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
170
+
171
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
172
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
173
+
174
+ Args:
175
+ config (`ConvNextV2Config`):
176
+ Model configuration class.
177
+ dim (`int`):
178
+ Number of input channels.
179
+ drop_path (`float`, defaults to 0.0):
180
+ Stochastic depth rate.
181
+ """
182
+
183
+ def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs):
184
+ super().__init__(**kwargs)
185
+ self.dim = dim
186
+ self.config = config
187
+ self.dwconv = keras.layers.Conv2D(
188
+ filters=dim,
189
+ kernel_size=7,
190
+ padding="same",
191
+ groups=dim,
192
+ kernel_initializer=get_initializer(config.initializer_range),
193
+ bias_initializer=keras.initializers.Zeros(),
194
+ name="dwconv",
195
+ ) # depthwise conv
196
+ self.layernorm = keras.layers.LayerNormalization(
197
+ epsilon=1e-6,
198
+ name="layernorm",
199
+ )
200
+ self.pwconv1 = keras.layers.Dense(
201
+ units=4 * dim,
202
+ kernel_initializer=get_initializer(config.initializer_range),
203
+ bias_initializer=keras.initializers.Zeros(),
204
+ name="pwconv1",
205
+ ) # pointwise/1x1 convs, implemented with linear layers
206
+ self.act = get_tf_activation(config.hidden_act)
207
+ self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn")
208
+ self.pwconv2 = keras.layers.Dense(
209
+ units=dim,
210
+ kernel_initializer=get_initializer(config.initializer_range),
211
+ bias_initializer=keras.initializers.Zeros(),
212
+ name="pwconv2",
213
+ )
214
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
215
+ # behaviour.
216
+ self.drop_path = (
217
+ TFConvNextV2DropPath(drop_path, name="drop_path")
218
+ if drop_path > 0.0
219
+ else keras.layers.Activation("linear", name="drop_path")
220
+ )
221
+
222
+ def call(self, hidden_states, training=False):
223
+ input = hidden_states
224
+ x = self.dwconv(hidden_states)
225
+ x = self.layernorm(x)
226
+ x = self.pwconv1(x)
227
+ x = self.act(x)
228
+ x = self.grn(x)
229
+ x = self.pwconv2(x)
230
+ x = self.drop_path(x, training=training)
231
+ x = input + x
232
+ return x
233
+
234
+ def build(self, input_shape=None):
235
+ if self.built:
236
+ return
237
+ self.built = True
238
+ if getattr(self, "dwconv", None) is not None:
239
+ with tf.name_scope(self.dwconv.name):
240
+ self.dwconv.build([None, None, None, self.dim])
241
+ if getattr(self, "layernorm", None) is not None:
242
+ with tf.name_scope(self.layernorm.name):
243
+ self.layernorm.build([None, None, None, self.dim])
244
+ if getattr(self, "pwconv1", None) is not None:
245
+ with tf.name_scope(self.pwconv1.name):
246
+ self.pwconv1.build([None, None, self.dim])
247
+ if getattr(self, "grn", None) is not None:
248
+ with tf.name_scope(self.grn.name):
249
+ self.grn.build(None)
250
+ if getattr(self, "pwconv2", None) is not None:
251
+ with tf.name_scope(self.pwconv2.name):
252
+ self.pwconv2.build([None, None, 4 * self.dim])
253
+ if getattr(self, "drop_path", None) is not None:
254
+ with tf.name_scope(self.drop_path.name):
255
+ self.drop_path.build(None)
256
+
257
+
258
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2
259
+ class TFConvNextV2Stage(keras.layers.Layer):
260
+ """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
261
+
262
+ Args:
263
+ config (`ConvNextV2V2Config`):
264
+ Model configuration class.
265
+ in_channels (`int`):
266
+ Number of input channels.
267
+ out_channels (`int`):
268
+ Number of output channels.
269
+ depth (`int`):
270
+ Number of residual blocks.
271
+ drop_path_rates(`List[float]`):
272
+ Stochastic depth rates for each layer.
273
+ """
274
+
275
+ def __init__(
276
+ self,
277
+ config: ConvNextV2Config,
278
+ in_channels: int,
279
+ out_channels: int,
280
+ kernel_size: int = 2,
281
+ stride: int = 2,
282
+ depth: int = 2,
283
+ drop_path_rates: Optional[List[float]] = None,
284
+ **kwargs,
285
+ ):
286
+ super().__init__(**kwargs)
287
+ if in_channels != out_channels or stride > 1:
288
+ self.downsampling_layer = [
289
+ keras.layers.LayerNormalization(
290
+ epsilon=1e-6,
291
+ name="downsampling_layer.0",
292
+ ),
293
+ # Inputs to this layer will follow NHWC format since we
294
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings`
295
+ # layer. All the outputs throughout the model will be in NHWC
296
+ # from this point on until the output where we again change to
297
+ # NCHW.
298
+ keras.layers.Conv2D(
299
+ filters=out_channels,
300
+ kernel_size=kernel_size,
301
+ strides=stride,
302
+ kernel_initializer=get_initializer(config.initializer_range),
303
+ bias_initializer=keras.initializers.Zeros(),
304
+ name="downsampling_layer.1",
305
+ ),
306
+ ]
307
+ else:
308
+ self.downsampling_layer = [tf.identity]
309
+
310
+ drop_path_rates = drop_path_rates or [0.0] * depth
311
+ self.layers = [
312
+ TFConvNextV2Layer(
313
+ config,
314
+ dim=out_channels,
315
+ drop_path=drop_path_rates[j],
316
+ name=f"layers.{j}",
317
+ )
318
+ for j in range(depth)
319
+ ]
320
+ self.in_channels = in_channels
321
+ self.out_channels = out_channels
322
+ self.stride = stride
323
+
324
+ def call(self, hidden_states):
325
+ for layer in self.downsampling_layer:
326
+ hidden_states = layer(hidden_states)
327
+ for layer in self.layers:
328
+ hidden_states = layer(hidden_states)
329
+ return hidden_states
330
+
331
+ def build(self, input_shape=None):
332
+ if self.built:
333
+ return
334
+ self.built = True
335
+ if getattr(self, "layers", None) is not None:
336
+ for layer in self.layers:
337
+ with tf.name_scope(layer.name):
338
+ layer.build(None)
339
+ if self.in_channels != self.out_channels or self.stride > 1:
340
+ with tf.name_scope(self.downsampling_layer[0].name):
341
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
342
+ with tf.name_scope(self.downsampling_layer[1].name):
343
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
344
+
345
+
346
+ class TFConvNextV2Encoder(keras.layers.Layer):
347
+ def __init__(self, config: ConvNextV2Config, **kwargs):
348
+ super().__init__(**kwargs)
349
+ self.stages = []
350
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
351
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
352
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
353
+ prev_chs = config.hidden_sizes[0]
354
+ for i in range(config.num_stages):
355
+ out_chs = config.hidden_sizes[i]
356
+ stage = TFConvNextV2Stage(
357
+ config,
358
+ in_channels=prev_chs,
359
+ out_channels=out_chs,
360
+ stride=2 if i > 0 else 1,
361
+ depth=config.depths[i],
362
+ drop_path_rates=drop_path_rates[i],
363
+ name=f"stages.{i}",
364
+ )
365
+ self.stages.append(stage)
366
+ prev_chs = out_chs
367
+
368
+ def call(
369
+ self,
370
+ hidden_states: tf.Tensor,
371
+ output_hidden_states: Optional[bool] = False,
372
+ return_dict: Optional[bool] = True,
373
+ ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
374
+ all_hidden_states = () if output_hidden_states else None
375
+
376
+ for i, layer_module in enumerate(self.stages):
377
+ if output_hidden_states:
378
+ all_hidden_states = all_hidden_states + (hidden_states,)
379
+
380
+ hidden_states = layer_module(hidden_states)
381
+
382
+ if output_hidden_states:
383
+ all_hidden_states = all_hidden_states + (hidden_states,)
384
+
385
+ if not return_dict:
386
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
387
+
388
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
389
+
390
+ def build(self, input_shape=None):
391
+ for stage in self.stages:
392
+ with tf.name_scope(stage.name):
393
+ stage.build(None)
394
+
395
+
396
+ @keras_serializable
397
+ class TFConvNextV2MainLayer(keras.layers.Layer):
398
+ config_class = ConvNextV2Config
399
+
400
+ def __init__(self, config: ConvNextV2Config, **kwargs):
401
+ super().__init__(**kwargs)
402
+
403
+ self.config = config
404
+ self.embeddings = TFConvNextV2Embeddings(config, name="embeddings")
405
+ self.encoder = TFConvNextV2Encoder(config, name="encoder")
406
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
407
+ # We are setting the `data_format` like so because from here on we will revert to the
408
+ # NCHW output format
409
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last")
410
+
411
+ @unpack_inputs
412
+ def call(
413
+ self,
414
+ pixel_values: TFModelInputType | None = None,
415
+ output_hidden_states: Optional[bool] = None,
416
+ return_dict: Optional[bool] = None,
417
+ training: bool = False,
418
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
419
+ output_hidden_states = (
420
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
421
+ )
422
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
423
+
424
+ if pixel_values is None:
425
+ raise ValueError("You have to specify pixel_values")
426
+
427
+ embedding_output = self.embeddings(pixel_values, training=training)
428
+
429
+ encoder_outputs = self.encoder(
430
+ embedding_output,
431
+ output_hidden_states=output_hidden_states,
432
+ return_dict=return_dict,
433
+ training=training,
434
+ )
435
+
436
+ last_hidden_state = encoder_outputs[0]
437
+
438
+ # Change to NCHW output format have uniformity in the modules
439
+ pooled_output = self.pooler(last_hidden_state)
440
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
441
+ pooled_output = self.layernorm(pooled_output)
442
+
443
+ # Change the other hidden state outputs to NCHW as well
444
+ if output_hidden_states:
445
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
446
+
447
+ if not return_dict:
448
+ hidden_states = hidden_states if output_hidden_states else ()
449
+ return (last_hidden_state, pooled_output) + hidden_states
450
+
451
+ return TFBaseModelOutputWithPoolingAndNoAttention(
452
+ last_hidden_state=last_hidden_state,
453
+ pooler_output=pooled_output,
454
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
455
+ )
456
+
457
+ def build(self, input_shape=None):
458
+ if self.built:
459
+ return
460
+ self.built = True
461
+ if getattr(self, "embeddings", None) is not None:
462
+ with tf.name_scope(self.embeddings.name):
463
+ self.embeddings.build(None)
464
+ if getattr(self, "encoder", None) is not None:
465
+ with tf.name_scope(self.encoder.name):
466
+ self.encoder.build(None)
467
+ if getattr(self, "layernorm", None) is not None:
468
+ with tf.name_scope(self.layernorm.name):
469
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
470
+
471
+
472
+ class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
473
+ """
474
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
475
+ models.
476
+ """
477
+
478
+ config_class = ConvNextV2Config
479
+ base_model_prefix = "convnextv2"
480
+ main_input_name = "pixel_values"
481
+
482
+
483
+ CONVNEXTV2_START_DOCSTRING = r"""
484
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
485
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
486
+ etc.)
487
+
488
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
489
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
490
+ behavior.
491
+
492
+ <Tip>
493
+
494
+ TensorFlow models and layers in `transformers` accept two formats as input:
495
+
496
+ - having all inputs as keyword arguments (like PyTorch models), or
497
+ - having all inputs as a list, tuple or dict in the first positional argument.
498
+
499
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
500
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
501
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
502
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
503
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
504
+ positional argument:
505
+
506
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
507
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
508
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
509
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
510
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
511
+
512
+ Note that when creating models and layers with
513
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
514
+ about any of this, as you can just pass inputs like you would to any other Python function!
515
+
516
+ </Tip>
517
+
518
+ Parameters:
519
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
520
+ Initializing with a config file does not load the weights associated with the model, only the
521
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
522
+ """
523
+
524
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
525
+ Args:
526
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
527
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
528
+ [`ConvNextImageProcessor.__call__`] for details.
529
+
530
+ output_hidden_states (`bool`, *optional*):
531
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
532
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
533
+ used instead.
534
+ return_dict (`bool`, *optional*):
535
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
536
+ eager mode, in graph mode the value will always be set to `True`.
537
+ """
538
+
539
+
540
+ @add_start_docstrings(
541
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
542
+ CONVNEXTV2_START_DOCSTRING,
543
+ )
544
+ class TFConvNextV2Model(TFConvNextV2PreTrainedModel):
545
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
546
+ super().__init__(config, *inputs, **kwargs)
547
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
548
+
549
+ @unpack_inputs
550
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
551
+ @add_code_sample_docstrings(
552
+ checkpoint=_CHECKPOINT_FOR_DOC,
553
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
554
+ config_class=_CONFIG_FOR_DOC,
555
+ modality="vision",
556
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
557
+ )
558
+ def call(
559
+ self,
560
+ pixel_values: TFModelInputType | None = None,
561
+ output_hidden_states: Optional[bool] = None,
562
+ return_dict: Optional[bool] = None,
563
+ training: bool = False,
564
+ ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
565
+ output_hidden_states = (
566
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
567
+ )
568
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
569
+
570
+ if pixel_values is None:
571
+ raise ValueError("You have to specify pixel_values")
572
+
573
+ outputs = self.convnextv2(
574
+ pixel_values=pixel_values,
575
+ output_hidden_states=output_hidden_states,
576
+ return_dict=return_dict,
577
+ training=training,
578
+ )
579
+
580
+ if not return_dict:
581
+ return outputs[:]
582
+
583
+ return TFBaseModelOutputWithPoolingAndNoAttention(
584
+ last_hidden_state=outputs.last_hidden_state,
585
+ pooler_output=outputs.pooler_output,
586
+ hidden_states=outputs.hidden_states,
587
+ )
588
+
589
+ def build(self, input_shape=None):
590
+ if self.built:
591
+ return
592
+ self.built = True
593
+ if getattr(self, "convnextv2", None) is not None:
594
+ with tf.name_scope(self.convnextv2.name):
595
+ self.convnextv2.build(None)
596
+
597
+
598
+ @add_start_docstrings(
599
+ """
600
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
601
+ ImageNet.
602
+ """,
603
+ CONVNEXTV2_START_DOCSTRING,
604
+ )
605
+ class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):
606
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
607
+ super().__init__(config, *inputs, **kwargs)
608
+
609
+ self.num_labels = config.num_labels
610
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
611
+
612
+ # Classifier head
613
+ self.classifier = keras.layers.Dense(
614
+ units=config.num_labels,
615
+ kernel_initializer=get_initializer(config.initializer_range),
616
+ bias_initializer=keras.initializers.Zeros(),
617
+ name="classifier",
618
+ )
619
+
620
+ @unpack_inputs
621
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
622
+ @add_code_sample_docstrings(
623
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
624
+ output_type=TFImageClassifierOutputWithNoAttention,
625
+ config_class=_CONFIG_FOR_DOC,
626
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
627
+ )
628
+ def call(
629
+ self,
630
+ pixel_values: TFModelInputType | None = None,
631
+ output_hidden_states: Optional[bool] = None,
632
+ return_dict: Optional[bool] = None,
633
+ labels: np.ndarray | tf.Tensor | None = None,
634
+ training: Optional[bool] = False,
635
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
636
+ r"""
637
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
638
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
639
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
640
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
641
+ """
642
+ output_hidden_states = (
643
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
644
+ )
645
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
646
+
647
+ if pixel_values is None:
648
+ raise ValueError("You have to specify pixel_values")
649
+
650
+ outputs = self.convnextv2(
651
+ pixel_values,
652
+ output_hidden_states=output_hidden_states,
653
+ return_dict=return_dict,
654
+ training=training,
655
+ )
656
+
657
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
658
+
659
+ logits = self.classifier(pooled_output)
660
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
661
+
662
+ if not return_dict:
663
+ output = (logits,) + outputs[2:]
664
+ return ((loss,) + output) if loss is not None else output
665
+
666
+ return TFImageClassifierOutputWithNoAttention(
667
+ loss=loss,
668
+ logits=logits,
669
+ hidden_states=outputs.hidden_states,
670
+ )
671
+
672
+ def build(self, input_shape=None):
673
+ if self.built:
674
+ return
675
+ self.built = True
676
+ if getattr(self, "convnextv2", None) is not None:
677
+ with tf.name_scope(self.convnextv2.name):
678
+ self.convnextv2.build(None)
679
+ if getattr(self, "classifier", None) is not None:
680
+ with tf.name_scope(self.classifier.name):
681
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
venv/lib/python3.10/site-packages/transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for FastSpeech2Conformer."""
16
+ import json
17
+ import os
18
+ from typing import Optional, Tuple
19
+
20
+ import regex
21
+
22
+ from ...tokenization_utils import PreTrainedTokenizer
23
+ from ...utils import logging, requires_backends
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"}
29
+
30
+
31
+ class FastSpeech2ConformerTokenizer(PreTrainedTokenizer):
32
+ """
33
+ Construct a FastSpeech2Conformer tokenizer.
34
+
35
+ Args:
36
+ vocab_file (`str`):
37
+ Path to the vocabulary file.
38
+ bos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
39
+ The begin of sequence token. Note that for FastSpeech2, it is the same as the `eos_token`.
40
+ eos_token (`str`, *optional*, defaults to `"<sos/eos>"`):
41
+ The end of sequence token. Note that for FastSpeech2, it is the same as the `bos_token`.
42
+ pad_token (`str`, *optional*, defaults to `"<blank>"`):
43
+ The token used for padding, for example when batching sequences of different lengths.
44
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ should_strip_spaces (`bool`, *optional*, defaults to `False`):
48
+ Whether or not to strip the spaces from the list of tokens.
49
+ """
50
+
51
+ vocab_files_names = VOCAB_FILES_NAMES
52
+ model_input_names = ["input_ids", "attention_mask"]
53
+
54
+ def __init__(
55
+ self,
56
+ vocab_file,
57
+ bos_token="<sos/eos>",
58
+ eos_token="<sos/eos>",
59
+ pad_token="<blank>",
60
+ unk_token="<unk>",
61
+ should_strip_spaces=False,
62
+ **kwargs,
63
+ ):
64
+ requires_backends(self, "g2p_en")
65
+
66
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
67
+ self.encoder = json.load(vocab_handle)
68
+
69
+ import g2p_en
70
+
71
+ self.g2p = g2p_en.G2p()
72
+
73
+ self.decoder = {v: k for k, v in self.encoder.items()}
74
+
75
+ super().__init__(
76
+ bos_token=bos_token,
77
+ eos_token=eos_token,
78
+ unk_token=unk_token,
79
+ pad_token=pad_token,
80
+ should_strip_spaces=should_strip_spaces,
81
+ **kwargs,
82
+ )
83
+
84
+ self.should_strip_spaces = should_strip_spaces
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ return len(self.decoder)
89
+
90
+ def get_vocab(self):
91
+ "Returns vocab as a dict"
92
+ return dict(self.encoder, **self.added_tokens_encoder)
93
+
94
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
95
+ # expand symbols
96
+ text = regex.sub(";", ",", text)
97
+ text = regex.sub(":", ",", text)
98
+ text = regex.sub("-", " ", text)
99
+ text = regex.sub("&", "and", text)
100
+
101
+ # strip unnecessary symbols
102
+ text = regex.sub(r"[\(\)\[\]\<\>\"]+", "", text)
103
+
104
+ # strip whitespaces
105
+ text = regex.sub(r"\s+", " ", text)
106
+
107
+ text = text.upper()
108
+
109
+ return text, kwargs
110
+
111
+ def _tokenize(self, text):
112
+ """Returns a tokenized string."""
113
+ # phonemize
114
+ tokens = self.g2p(text)
115
+
116
+ if self.should_strip_spaces:
117
+ tokens = list(filter(lambda s: s != " ", tokens))
118
+
119
+ tokens.append(self.eos_token)
120
+
121
+ return tokens
122
+
123
+ def _convert_token_to_id(self, token):
124
+ """Converts a token (str) in an id using the vocab."""
125
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
126
+
127
+ def _convert_id_to_token(self, index):
128
+ """Converts an index (integer) in a token (str) using the vocab."""
129
+ return self.decoder.get(index, self.unk_token)
130
+
131
+ # Override since phonemes cannot be converted back to strings
132
+ def decode(self, token_ids, **kwargs):
133
+ logger.warning(
134
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, converting to tokens instead."
135
+ )
136
+ return self.convert_ids_to_tokens(token_ids)
137
+
138
+ # Override since phonemes cannot be converted back to strings
139
+ def convert_tokens_to_string(self, tokens, **kwargs):
140
+ logger.warning(
141
+ "Phonemes cannot be reliably converted to a string due to the one-many mapping, returning the tokens."
142
+ )
143
+ return tokens
144
+
145
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
158
+ return
159
+ vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
161
+ )
162
+
163
+ with open(vocab_file, "w", encoding="utf-8") as f:
164
+ f.write(json.dumps(self.get_vocab(), ensure_ascii=False))
165
+
166
+ return (vocab_file,)
167
+
168
+ def __getstate__(self):
169
+ state = self.__dict__.copy()
170
+ state["g2p"] = None
171
+ return state
172
+
173
+ def __setstate__(self, d):
174
+ self.__dict__ = d
175
+
176
+ try:
177
+ import g2p_en
178
+
179
+ self.g2p = g2p_en.G2p()
180
+ except ImportError:
181
+ raise ImportError(
182
+ "You need to install g2p-en to use FastSpeech2ConformerTokenizer. "
183
+ "See https://pypi.org/project/g2p-en/ for installation."
184
+ )
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/convert_original_tf_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/mobilenet_v1/__pycache__/modeling_mobilenet_v1.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/prophetnet/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig"],
22
+ "tokenization_prophetnet": ["ProphetNetTokenizer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["modeling_prophetnet"] = [
32
+ "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST",
33
+ "ProphetNetDecoder",
34
+ "ProphetNetEncoder",
35
+ "ProphetNetForCausalLM",
36
+ "ProphetNetForConditionalGeneration",
37
+ "ProphetNetModel",
38
+ "ProphetNetPreTrainedModel",
39
+ ]
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
44
+ from .tokenization_prophetnet import ProphetNetTokenizer
45
+
46
+ try:
47
+ if not is_torch_available():
48
+ raise OptionalDependencyNotAvailable()
49
+ except OptionalDependencyNotAvailable:
50
+ pass
51
+ else:
52
+ from .modeling_prophetnet import (
53
+ PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
54
+ ProphetNetDecoder,
55
+ ProphetNetEncoder,
56
+ ProphetNetForCausalLM,
57
+ ProphetNetForConditionalGeneration,
58
+ ProphetNetModel,
59
+ ProphetNetPreTrainedModel,
60
+ )
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/configuration_prophetnet.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/prophetnet/__pycache__/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (3.72 kB). View file