ZhangYunchenY commited on
Commit
426b5ab
·
1 Parent(s): 32703c7

[Model] bart-base-xsum

Browse files
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/lustre/zhangyunchen/transformers/bart-base",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "BartForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 0,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 768,
15
+ "decoder_attention_heads": 12,
16
+ "decoder_ffn_dim": 3072,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 6,
19
+ "decoder_start_token_id": 2,
20
+ "dropout": 0.1,
21
+ "early_stopping": true,
22
+ "encoder_attention_heads": 12,
23
+ "encoder_ffn_dim": 3072,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 2,
27
+ "forced_eos_token_id": 2,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "max_length": 62,
42
+ "max_position_embeddings": 1024,
43
+ "min_length": 11,
44
+ "model_type": "bart",
45
+ "no_repeat_ngram_size": 3,
46
+ "normalize_before": false,
47
+ "normalize_embedding": true,
48
+ "num_beams": 6,
49
+ "num_hidden_layers": 6,
50
+ "pad_token_id": 1,
51
+ "scale_embedding": false,
52
+ "task_specific_params": {
53
+ "summarization": {
54
+ "length_penalty": 1.0,
55
+ "max_length": 128,
56
+ "min_length": 12,
57
+ "num_beams": 4
58
+ },
59
+ "summarization_cnn": {
60
+ "length_penalty": 2.0,
61
+ "max_length": 142,
62
+ "min_length": 56,
63
+ "num_beams": 4
64
+ },
65
+ "summarization_xsum": {
66
+ "length_penalty": 1.0,
67
+ "max_length": 62,
68
+ "min_length": 11,
69
+ "num_beams": 6
70
+ }
71
+ },
72
+ "torch_dtype": "float32",
73
+ "transformers_version": "4.16.2",
74
+ "use_cache": true,
75
+ "vocab_size": 50265
76
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a196d5ba128b5eb44bc2e6151bcb783ca392b1bdd2168ed3f4f4c4a99b42dad
3
+ size 1115527467
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0b86faab03570f1fc461375c131869c09823b9cca68b1e43956547b5c623ad7
3
+ size 557986199
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d80e5d4701cb1988acad5c269e31fefd56007b39b1a0ae15ae75d78e08de92df
3
+ size 14595
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a4d52ca32d90c332eea073c953c4ddc1ae9cfc78a8e90e95dedece59d83b3b2
3
+ size 623
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "trim_offsets": true, "special_tokens_map_file": null, "name_or_path": "/mnt/lustre/zhangyunchen/transformers/bart-base", "tokenizer_class": "BartTokenizer"}
trainer_state.json ADDED
@@ -0,0 +1,1080 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 11.91962675448914,
5
+ "global_step": 76000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.16,
12
+ "learning_rate": 8.712319219376199e-06,
13
+ "loss": 3.9787,
14
+ "step": 1000
15
+ },
16
+ {
17
+ "epoch": 0.16,
18
+ "eval_loss": 3.446138381958008,
19
+ "eval_runtime": 172.8011,
20
+ "eval_samples_per_second": 65.578,
21
+ "eval_steps_per_second": 16.395,
22
+ "step": 1000
23
+ },
24
+ {
25
+ "epoch": 0.31,
26
+ "learning_rate": 1.7424638438752397e-05,
27
+ "loss": 3.6486,
28
+ "step": 2000
29
+ },
30
+ {
31
+ "epoch": 0.31,
32
+ "eval_loss": 3.351425886154175,
33
+ "eval_runtime": 164.1827,
34
+ "eval_samples_per_second": 69.021,
35
+ "eval_steps_per_second": 17.255,
36
+ "step": 2000
37
+ },
38
+ {
39
+ "epoch": 0.47,
40
+ "learning_rate": 2.6136957658128598e-05,
41
+ "loss": 3.5509,
42
+ "step": 3000
43
+ },
44
+ {
45
+ "epoch": 0.47,
46
+ "eval_loss": 3.2961513996124268,
47
+ "eval_runtime": 164.8921,
48
+ "eval_samples_per_second": 68.724,
49
+ "eval_steps_per_second": 17.181,
50
+ "step": 3000
51
+ },
52
+ {
53
+ "epoch": 0.63,
54
+ "learning_rate": 3.4849276877504794e-05,
55
+ "loss": 3.488,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.63,
60
+ "eval_loss": 3.2645070552825928,
61
+ "eval_runtime": 175.5668,
62
+ "eval_samples_per_second": 64.545,
63
+ "eval_steps_per_second": 16.136,
64
+ "step": 4000
65
+ },
66
+ {
67
+ "epoch": 0.78,
68
+ "learning_rate": 4.356159609688099e-05,
69
+ "loss": 3.4485,
70
+ "step": 5000
71
+ },
72
+ {
73
+ "epoch": 0.78,
74
+ "eval_loss": 3.236461877822876,
75
+ "eval_runtime": 172.1625,
76
+ "eval_samples_per_second": 65.822,
77
+ "eval_steps_per_second": 16.455,
78
+ "step": 5000
79
+ },
80
+ {
81
+ "epoch": 0.94,
82
+ "learning_rate": 4.985484032435679e-05,
83
+ "loss": 3.4183,
84
+ "step": 6000
85
+ },
86
+ {
87
+ "epoch": 0.94,
88
+ "eval_loss": 3.213158130645752,
89
+ "eval_runtime": 165.7563,
90
+ "eval_samples_per_second": 68.365,
91
+ "eval_steps_per_second": 17.091,
92
+ "step": 6000
93
+ },
94
+ {
95
+ "epoch": 1.1,
96
+ "learning_rate": 4.9298672984727646e-05,
97
+ "loss": 3.3437,
98
+ "step": 7000
99
+ },
100
+ {
101
+ "epoch": 1.1,
102
+ "eval_loss": 3.199575185775757,
103
+ "eval_runtime": 155.1508,
104
+ "eval_samples_per_second": 73.039,
105
+ "eval_steps_per_second": 18.26,
106
+ "step": 7000
107
+ },
108
+ {
109
+ "epoch": 1.25,
110
+ "learning_rate": 4.87425056450985e-05,
111
+ "loss": 3.2964,
112
+ "step": 8000
113
+ },
114
+ {
115
+ "epoch": 1.25,
116
+ "eval_loss": 3.1807236671447754,
117
+ "eval_runtime": 156.1445,
118
+ "eval_samples_per_second": 72.574,
119
+ "eval_steps_per_second": 18.143,
120
+ "step": 8000
121
+ },
122
+ {
123
+ "epoch": 1.41,
124
+ "learning_rate": 4.8186338305469355e-05,
125
+ "loss": 3.2813,
126
+ "step": 9000
127
+ },
128
+ {
129
+ "epoch": 1.41,
130
+ "eval_loss": 3.1617496013641357,
131
+ "eval_runtime": 169.4771,
132
+ "eval_samples_per_second": 66.864,
133
+ "eval_steps_per_second": 16.716,
134
+ "step": 9000
135
+ },
136
+ {
137
+ "epoch": 1.57,
138
+ "learning_rate": 4.76301709658402e-05,
139
+ "loss": 3.2586,
140
+ "step": 10000
141
+ },
142
+ {
143
+ "epoch": 1.57,
144
+ "eval_loss": 3.14561128616333,
145
+ "eval_runtime": 169.05,
146
+ "eval_samples_per_second": 67.033,
147
+ "eval_steps_per_second": 16.758,
148
+ "step": 10000
149
+ },
150
+ {
151
+ "epoch": 1.73,
152
+ "learning_rate": 4.7074003626211057e-05,
153
+ "loss": 3.248,
154
+ "step": 11000
155
+ },
156
+ {
157
+ "epoch": 1.73,
158
+ "eval_loss": 3.1361846923828125,
159
+ "eval_runtime": 165.9904,
160
+ "eval_samples_per_second": 68.269,
161
+ "eval_steps_per_second": 17.067,
162
+ "step": 11000
163
+ },
164
+ {
165
+ "epoch": 1.88,
166
+ "learning_rate": 4.651783628658191e-05,
167
+ "loss": 3.2413,
168
+ "step": 12000
169
+ },
170
+ {
171
+ "epoch": 1.88,
172
+ "eval_loss": 3.1259961128234863,
173
+ "eval_runtime": 167.4544,
174
+ "eval_samples_per_second": 67.672,
175
+ "eval_steps_per_second": 16.918,
176
+ "step": 12000
177
+ },
178
+ {
179
+ "epoch": 2.04,
180
+ "learning_rate": 4.5961668946952765e-05,
181
+ "loss": 3.1974,
182
+ "step": 13000
183
+ },
184
+ {
185
+ "epoch": 2.04,
186
+ "eval_loss": 3.121722459793091,
187
+ "eval_runtime": 167.3385,
188
+ "eval_samples_per_second": 67.719,
189
+ "eval_steps_per_second": 16.93,
190
+ "step": 13000
191
+ },
192
+ {
193
+ "epoch": 2.2,
194
+ "learning_rate": 4.540550160732361e-05,
195
+ "loss": 3.1014,
196
+ "step": 14000
197
+ },
198
+ {
199
+ "epoch": 2.2,
200
+ "eval_loss": 3.1170709133148193,
201
+ "eval_runtime": 166.2081,
202
+ "eval_samples_per_second": 68.18,
203
+ "eval_steps_per_second": 17.045,
204
+ "step": 14000
205
+ },
206
+ {
207
+ "epoch": 2.35,
208
+ "learning_rate": 4.484933426769447e-05,
209
+ "loss": 3.1037,
210
+ "step": 15000
211
+ },
212
+ {
213
+ "epoch": 2.35,
214
+ "eval_loss": 3.1060075759887695,
215
+ "eval_runtime": 163.3667,
216
+ "eval_samples_per_second": 69.365,
217
+ "eval_steps_per_second": 17.341,
218
+ "step": 15000
219
+ },
220
+ {
221
+ "epoch": 2.51,
222
+ "learning_rate": 4.4293166928065315e-05,
223
+ "loss": 3.1047,
224
+ "step": 16000
225
+ },
226
+ {
227
+ "epoch": 2.51,
228
+ "eval_loss": 3.1029844284057617,
229
+ "eval_runtime": 166.2417,
230
+ "eval_samples_per_second": 68.166,
231
+ "eval_steps_per_second": 17.041,
232
+ "step": 16000
233
+ },
234
+ {
235
+ "epoch": 2.67,
236
+ "learning_rate": 4.3736999588436176e-05,
237
+ "loss": 3.1004,
238
+ "step": 17000
239
+ },
240
+ {
241
+ "epoch": 2.67,
242
+ "eval_loss": 3.0984201431274414,
243
+ "eval_runtime": 156.2961,
244
+ "eval_samples_per_second": 72.503,
245
+ "eval_steps_per_second": 18.126,
246
+ "step": 17000
247
+ },
248
+ {
249
+ "epoch": 2.82,
250
+ "learning_rate": 4.3180832248807023e-05,
251
+ "loss": 3.0932,
252
+ "step": 18000
253
+ },
254
+ {
255
+ "epoch": 2.82,
256
+ "eval_loss": 3.0867185592651367,
257
+ "eval_runtime": 163.2041,
258
+ "eval_samples_per_second": 69.435,
259
+ "eval_steps_per_second": 17.359,
260
+ "step": 18000
261
+ },
262
+ {
263
+ "epoch": 2.98,
264
+ "learning_rate": 4.262466490917788e-05,
265
+ "loss": 3.0966,
266
+ "step": 19000
267
+ },
268
+ {
269
+ "epoch": 2.98,
270
+ "eval_loss": 3.0811350345611572,
271
+ "eval_runtime": 165.6977,
272
+ "eval_samples_per_second": 68.39,
273
+ "eval_steps_per_second": 17.097,
274
+ "step": 19000
275
+ },
276
+ {
277
+ "epoch": 3.14,
278
+ "learning_rate": 4.2068497569548725e-05,
279
+ "loss": 2.9921,
280
+ "step": 20000
281
+ },
282
+ {
283
+ "epoch": 3.14,
284
+ "eval_loss": 3.089102029800415,
285
+ "eval_runtime": 158.578,
286
+ "eval_samples_per_second": 71.46,
287
+ "eval_steps_per_second": 17.865,
288
+ "step": 20000
289
+ },
290
+ {
291
+ "epoch": 3.29,
292
+ "learning_rate": 4.151233022991958e-05,
293
+ "loss": 2.9753,
294
+ "step": 21000
295
+ },
296
+ {
297
+ "epoch": 3.29,
298
+ "eval_loss": 3.0833535194396973,
299
+ "eval_runtime": 156.4016,
300
+ "eval_samples_per_second": 72.454,
301
+ "eval_steps_per_second": 18.114,
302
+ "step": 21000
303
+ },
304
+ {
305
+ "epoch": 3.45,
306
+ "learning_rate": 4.0956162890290434e-05,
307
+ "loss": 2.9872,
308
+ "step": 22000
309
+ },
310
+ {
311
+ "epoch": 3.45,
312
+ "eval_loss": 3.0813159942626953,
313
+ "eval_runtime": 154.7129,
314
+ "eval_samples_per_second": 73.245,
315
+ "eval_steps_per_second": 18.311,
316
+ "step": 22000
317
+ },
318
+ {
319
+ "epoch": 3.61,
320
+ "learning_rate": 4.039999555066128e-05,
321
+ "loss": 2.9916,
322
+ "step": 23000
323
+ },
324
+ {
325
+ "epoch": 3.61,
326
+ "eval_loss": 3.069411516189575,
327
+ "eval_runtime": 157.7992,
328
+ "eval_samples_per_second": 71.813,
329
+ "eval_steps_per_second": 17.953,
330
+ "step": 23000
331
+ },
332
+ {
333
+ "epoch": 3.76,
334
+ "learning_rate": 3.9843828211032136e-05,
335
+ "loss": 2.9885,
336
+ "step": 24000
337
+ },
338
+ {
339
+ "epoch": 3.76,
340
+ "eval_loss": 3.0638270378112793,
341
+ "eval_runtime": 155.9919,
342
+ "eval_samples_per_second": 72.645,
343
+ "eval_steps_per_second": 18.161,
344
+ "step": 24000
345
+ },
346
+ {
347
+ "epoch": 3.92,
348
+ "learning_rate": 3.928766087140299e-05,
349
+ "loss": 2.9925,
350
+ "step": 25000
351
+ },
352
+ {
353
+ "epoch": 3.92,
354
+ "eval_loss": 3.0645134449005127,
355
+ "eval_runtime": 154.9825,
356
+ "eval_samples_per_second": 73.118,
357
+ "eval_steps_per_second": 18.279,
358
+ "step": 25000
359
+ },
360
+ {
361
+ "epoch": 4.08,
362
+ "learning_rate": 3.8731493531773845e-05,
363
+ "loss": 2.9338,
364
+ "step": 26000
365
+ },
366
+ {
367
+ "epoch": 4.08,
368
+ "eval_loss": 3.072267770767212,
369
+ "eval_runtime": 160.362,
370
+ "eval_samples_per_second": 70.665,
371
+ "eval_steps_per_second": 17.666,
372
+ "step": 26000
373
+ },
374
+ {
375
+ "epoch": 4.23,
376
+ "learning_rate": 3.817532619214469e-05,
377
+ "loss": 2.8808,
378
+ "step": 27000
379
+ },
380
+ {
381
+ "epoch": 4.23,
382
+ "eval_loss": 3.0753729343414307,
383
+ "eval_runtime": 154.306,
384
+ "eval_samples_per_second": 73.439,
385
+ "eval_steps_per_second": 18.36,
386
+ "step": 27000
387
+ },
388
+ {
389
+ "epoch": 4.39,
390
+ "learning_rate": 3.7619158852515547e-05,
391
+ "loss": 2.8947,
392
+ "step": 28000
393
+ },
394
+ {
395
+ "epoch": 4.39,
396
+ "eval_loss": 3.0727920532226562,
397
+ "eval_runtime": 155.5666,
398
+ "eval_samples_per_second": 72.843,
399
+ "eval_steps_per_second": 18.211,
400
+ "step": 28000
401
+ },
402
+ {
403
+ "epoch": 4.55,
404
+ "learning_rate": 3.7062991512886394e-05,
405
+ "loss": 2.8972,
406
+ "step": 29000
407
+ },
408
+ {
409
+ "epoch": 4.55,
410
+ "eval_loss": 3.0629560947418213,
411
+ "eval_runtime": 155.1806,
412
+ "eval_samples_per_second": 73.025,
413
+ "eval_steps_per_second": 18.256,
414
+ "step": 29000
415
+ },
416
+ {
417
+ "epoch": 4.71,
418
+ "learning_rate": 3.6506824173257255e-05,
419
+ "loss": 2.8965,
420
+ "step": 30000
421
+ },
422
+ {
423
+ "epoch": 4.71,
424
+ "eval_loss": 3.063514471054077,
425
+ "eval_runtime": 154.5456,
426
+ "eval_samples_per_second": 73.325,
427
+ "eval_steps_per_second": 18.331,
428
+ "step": 30000
429
+ },
430
+ {
431
+ "epoch": 4.86,
432
+ "learning_rate": 3.59506568336281e-05,
433
+ "loss": 2.9067,
434
+ "step": 31000
435
+ },
436
+ {
437
+ "epoch": 4.86,
438
+ "eval_loss": 3.055640697479248,
439
+ "eval_runtime": 154.9311,
440
+ "eval_samples_per_second": 73.142,
441
+ "eval_steps_per_second": 18.286,
442
+ "step": 31000
443
+ },
444
+ {
445
+ "epoch": 5.02,
446
+ "learning_rate": 3.539448949399896e-05,
447
+ "loss": 2.8863,
448
+ "step": 32000
449
+ },
450
+ {
451
+ "epoch": 5.02,
452
+ "eval_loss": 3.0722622871398926,
453
+ "eval_runtime": 154.3548,
454
+ "eval_samples_per_second": 73.415,
455
+ "eval_steps_per_second": 18.354,
456
+ "step": 32000
457
+ },
458
+ {
459
+ "epoch": 5.18,
460
+ "learning_rate": 3.4838322154369805e-05,
461
+ "loss": 2.8005,
462
+ "step": 33000
463
+ },
464
+ {
465
+ "epoch": 5.18,
466
+ "eval_loss": 3.072225332260132,
467
+ "eval_runtime": 168.3304,
468
+ "eval_samples_per_second": 67.32,
469
+ "eval_steps_per_second": 16.83,
470
+ "step": 33000
471
+ },
472
+ {
473
+ "epoch": 5.33,
474
+ "learning_rate": 3.4282154814740666e-05,
475
+ "loss": 2.8136,
476
+ "step": 34000
477
+ },
478
+ {
479
+ "epoch": 5.33,
480
+ "eval_loss": 3.067979097366333,
481
+ "eval_runtime": 164.2522,
482
+ "eval_samples_per_second": 68.991,
483
+ "eval_steps_per_second": 17.248,
484
+ "step": 34000
485
+ },
486
+ {
487
+ "epoch": 5.49,
488
+ "learning_rate": 3.3725987475111513e-05,
489
+ "loss": 2.8092,
490
+ "step": 35000
491
+ },
492
+ {
493
+ "epoch": 5.49,
494
+ "eval_loss": 3.0639047622680664,
495
+ "eval_runtime": 167.1507,
496
+ "eval_samples_per_second": 67.795,
497
+ "eval_steps_per_second": 16.949,
498
+ "step": 35000
499
+ },
500
+ {
501
+ "epoch": 5.65,
502
+ "learning_rate": 3.316982013548237e-05,
503
+ "loss": 2.8186,
504
+ "step": 36000
505
+ },
506
+ {
507
+ "epoch": 5.65,
508
+ "eval_loss": 3.0562849044799805,
509
+ "eval_runtime": 158.9079,
510
+ "eval_samples_per_second": 71.312,
511
+ "eval_steps_per_second": 17.828,
512
+ "step": 36000
513
+ },
514
+ {
515
+ "epoch": 5.8,
516
+ "learning_rate": 3.2613652795853215e-05,
517
+ "loss": 2.8306,
518
+ "step": 37000
519
+ },
520
+ {
521
+ "epoch": 5.8,
522
+ "eval_loss": 3.0535264015197754,
523
+ "eval_runtime": 150.4043,
524
+ "eval_samples_per_second": 75.344,
525
+ "eval_steps_per_second": 18.836,
526
+ "step": 37000
527
+ },
528
+ {
529
+ "epoch": 5.96,
530
+ "learning_rate": 3.205748545622407e-05,
531
+ "loss": 2.8327,
532
+ "step": 38000
533
+ },
534
+ {
535
+ "epoch": 5.96,
536
+ "eval_loss": 3.0540544986724854,
537
+ "eval_runtime": 159.9431,
538
+ "eval_samples_per_second": 70.85,
539
+ "eval_steps_per_second": 17.713,
540
+ "step": 38000
541
+ },
542
+ {
543
+ "epoch": 6.12,
544
+ "learning_rate": 3.1501318116594924e-05,
545
+ "loss": 2.7548,
546
+ "step": 39000
547
+ },
548
+ {
549
+ "epoch": 6.12,
550
+ "eval_loss": 3.068983554840088,
551
+ "eval_runtime": 157.2439,
552
+ "eval_samples_per_second": 72.066,
553
+ "eval_steps_per_second": 18.017,
554
+ "step": 39000
555
+ },
556
+ {
557
+ "epoch": 6.27,
558
+ "learning_rate": 3.094515077696578e-05,
559
+ "loss": 2.7369,
560
+ "step": 40000
561
+ },
562
+ {
563
+ "epoch": 6.27,
564
+ "eval_loss": 3.067845344543457,
565
+ "eval_runtime": 151.1866,
566
+ "eval_samples_per_second": 74.954,
567
+ "eval_steps_per_second": 18.738,
568
+ "step": 40000
569
+ },
570
+ {
571
+ "epoch": 6.43,
572
+ "learning_rate": 3.0388983437336626e-05,
573
+ "loss": 2.7471,
574
+ "step": 41000
575
+ },
576
+ {
577
+ "epoch": 6.43,
578
+ "eval_loss": 3.063314914703369,
579
+ "eval_runtime": 150.8738,
580
+ "eval_samples_per_second": 75.109,
581
+ "eval_steps_per_second": 18.777,
582
+ "step": 41000
583
+ },
584
+ {
585
+ "epoch": 6.59,
586
+ "learning_rate": 2.9832816097707477e-05,
587
+ "loss": 2.7576,
588
+ "step": 42000
589
+ },
590
+ {
591
+ "epoch": 6.59,
592
+ "eval_loss": 3.0629308223724365,
593
+ "eval_runtime": 170.5929,
594
+ "eval_samples_per_second": 66.427,
595
+ "eval_steps_per_second": 16.607,
596
+ "step": 42000
597
+ },
598
+ {
599
+ "epoch": 6.74,
600
+ "learning_rate": 2.9276648758078335e-05,
601
+ "loss": 2.7566,
602
+ "step": 43000
603
+ },
604
+ {
605
+ "epoch": 6.74,
606
+ "eval_loss": 3.0592966079711914,
607
+ "eval_runtime": 152.4939,
608
+ "eval_samples_per_second": 74.311,
609
+ "eval_steps_per_second": 18.578,
610
+ "step": 43000
611
+ },
612
+ {
613
+ "epoch": 6.9,
614
+ "learning_rate": 2.8720481418449186e-05,
615
+ "loss": 2.7642,
616
+ "step": 44000
617
+ },
618
+ {
619
+ "epoch": 6.9,
620
+ "eval_loss": 3.055969715118408,
621
+ "eval_runtime": 152.0719,
622
+ "eval_samples_per_second": 74.517,
623
+ "eval_steps_per_second": 18.629,
624
+ "step": 44000
625
+ },
626
+ {
627
+ "epoch": 7.06,
628
+ "learning_rate": 2.8164314078820037e-05,
629
+ "loss": 2.7268,
630
+ "step": 45000
631
+ },
632
+ {
633
+ "epoch": 7.06,
634
+ "eval_loss": 3.0751819610595703,
635
+ "eval_runtime": 150.8723,
636
+ "eval_samples_per_second": 75.11,
637
+ "eval_steps_per_second": 18.777,
638
+ "step": 45000
639
+ },
640
+ {
641
+ "epoch": 7.21,
642
+ "learning_rate": 2.7608146739190888e-05,
643
+ "loss": 2.6778,
644
+ "step": 46000
645
+ },
646
+ {
647
+ "epoch": 7.21,
648
+ "eval_loss": 3.0745816230773926,
649
+ "eval_runtime": 150.773,
650
+ "eval_samples_per_second": 75.159,
651
+ "eval_steps_per_second": 18.79,
652
+ "step": 46000
653
+ },
654
+ {
655
+ "epoch": 7.37,
656
+ "learning_rate": 2.705197939956174e-05,
657
+ "loss": 2.6895,
658
+ "step": 47000
659
+ },
660
+ {
661
+ "epoch": 7.37,
662
+ "eval_loss": 3.071889877319336,
663
+ "eval_runtime": 151.4074,
664
+ "eval_samples_per_second": 74.844,
665
+ "eval_steps_per_second": 18.711,
666
+ "step": 47000
667
+ },
668
+ {
669
+ "epoch": 7.53,
670
+ "learning_rate": 2.6495812059932596e-05,
671
+ "loss": 2.6908,
672
+ "step": 48000
673
+ },
674
+ {
675
+ "epoch": 7.53,
676
+ "eval_loss": 3.0682897567749023,
677
+ "eval_runtime": 150.9084,
678
+ "eval_samples_per_second": 75.092,
679
+ "eval_steps_per_second": 18.773,
680
+ "step": 48000
681
+ },
682
+ {
683
+ "epoch": 7.69,
684
+ "learning_rate": 2.5939644720303447e-05,
685
+ "loss": 2.6985,
686
+ "step": 49000
687
+ },
688
+ {
689
+ "epoch": 7.69,
690
+ "eval_loss": 3.06658935546875,
691
+ "eval_runtime": 151.6999,
692
+ "eval_samples_per_second": 74.7,
693
+ "eval_steps_per_second": 18.675,
694
+ "step": 49000
695
+ },
696
+ {
697
+ "epoch": 7.84,
698
+ "learning_rate": 2.5383477380674298e-05,
699
+ "loss": 2.6969,
700
+ "step": 50000
701
+ },
702
+ {
703
+ "epoch": 7.84,
704
+ "eval_loss": 3.0591042041778564,
705
+ "eval_runtime": 161.1961,
706
+ "eval_samples_per_second": 70.299,
707
+ "eval_steps_per_second": 17.575,
708
+ "step": 50000
709
+ },
710
+ {
711
+ "epoch": 8.0,
712
+ "learning_rate": 2.4827310041045153e-05,
713
+ "loss": 2.7043,
714
+ "step": 51000
715
+ },
716
+ {
717
+ "epoch": 8.0,
718
+ "eval_loss": 3.0611181259155273,
719
+ "eval_runtime": 152.3504,
720
+ "eval_samples_per_second": 74.381,
721
+ "eval_steps_per_second": 18.595,
722
+ "step": 51000
723
+ },
724
+ {
725
+ "epoch": 8.16,
726
+ "learning_rate": 2.4271142701416004e-05,
727
+ "loss": 2.6225,
728
+ "step": 52000
729
+ },
730
+ {
731
+ "epoch": 8.16,
732
+ "eval_loss": 3.080932378768921,
733
+ "eval_runtime": 150.5999,
734
+ "eval_samples_per_second": 75.246,
735
+ "eval_steps_per_second": 18.811,
736
+ "step": 52000
737
+ },
738
+ {
739
+ "epoch": 8.31,
740
+ "learning_rate": 2.3714975361786858e-05,
741
+ "loss": 2.636,
742
+ "step": 53000
743
+ },
744
+ {
745
+ "epoch": 8.31,
746
+ "eval_loss": 3.0787675380706787,
747
+ "eval_runtime": 152.1353,
748
+ "eval_samples_per_second": 74.486,
749
+ "eval_steps_per_second": 18.622,
750
+ "step": 53000
751
+ },
752
+ {
753
+ "epoch": 8.47,
754
+ "learning_rate": 2.315880802215771e-05,
755
+ "loss": 2.6379,
756
+ "step": 54000
757
+ },
758
+ {
759
+ "epoch": 8.47,
760
+ "eval_loss": 3.0813567638397217,
761
+ "eval_runtime": 150.7408,
762
+ "eval_samples_per_second": 75.175,
763
+ "eval_steps_per_second": 18.794,
764
+ "step": 54000
765
+ },
766
+ {
767
+ "epoch": 8.63,
768
+ "learning_rate": 2.260264068252856e-05,
769
+ "loss": 2.6423,
770
+ "step": 55000
771
+ },
772
+ {
773
+ "epoch": 8.63,
774
+ "eval_loss": 3.0757715702056885,
775
+ "eval_runtime": 151.7054,
776
+ "eval_samples_per_second": 74.697,
777
+ "eval_steps_per_second": 18.674,
778
+ "step": 55000
779
+ },
780
+ {
781
+ "epoch": 8.78,
782
+ "learning_rate": 2.2046473342899414e-05,
783
+ "loss": 2.6417,
784
+ "step": 56000
785
+ },
786
+ {
787
+ "epoch": 8.78,
788
+ "eval_loss": 3.074010133743286,
789
+ "eval_runtime": 151.6069,
790
+ "eval_samples_per_second": 74.746,
791
+ "eval_steps_per_second": 18.686,
792
+ "step": 56000
793
+ },
794
+ {
795
+ "epoch": 8.94,
796
+ "learning_rate": 2.1490306003270265e-05,
797
+ "loss": 2.6507,
798
+ "step": 57000
799
+ },
800
+ {
801
+ "epoch": 8.94,
802
+ "eval_loss": 3.07344388961792,
803
+ "eval_runtime": 158.2388,
804
+ "eval_samples_per_second": 71.613,
805
+ "eval_steps_per_second": 17.903,
806
+ "step": 57000
807
+ },
808
+ {
809
+ "epoch": 9.1,
810
+ "learning_rate": 2.0934138663641116e-05,
811
+ "loss": 2.6058,
812
+ "step": 58000
813
+ },
814
+ {
815
+ "epoch": 9.1,
816
+ "eval_loss": 3.086090087890625,
817
+ "eval_runtime": 151.6749,
818
+ "eval_samples_per_second": 74.712,
819
+ "eval_steps_per_second": 18.678,
820
+ "step": 58000
821
+ },
822
+ {
823
+ "epoch": 9.25,
824
+ "learning_rate": 2.0377971324011967e-05,
825
+ "loss": 2.5833,
826
+ "step": 59000
827
+ },
828
+ {
829
+ "epoch": 9.25,
830
+ "eval_loss": 3.086378812789917,
831
+ "eval_runtime": 150.5584,
832
+ "eval_samples_per_second": 75.266,
833
+ "eval_steps_per_second": 18.817,
834
+ "step": 59000
835
+ },
836
+ {
837
+ "epoch": 9.41,
838
+ "learning_rate": 1.982180398438282e-05,
839
+ "loss": 2.5864,
840
+ "step": 60000
841
+ },
842
+ {
843
+ "epoch": 9.41,
844
+ "eval_loss": 3.081770896911621,
845
+ "eval_runtime": 152.7908,
846
+ "eval_samples_per_second": 74.167,
847
+ "eval_steps_per_second": 18.542,
848
+ "step": 60000
849
+ },
850
+ {
851
+ "epoch": 9.57,
852
+ "learning_rate": 1.9265636644753672e-05,
853
+ "loss": 2.5952,
854
+ "step": 61000
855
+ },
856
+ {
857
+ "epoch": 9.57,
858
+ "eval_loss": 3.0846848487854004,
859
+ "eval_runtime": 158.7675,
860
+ "eval_samples_per_second": 71.375,
861
+ "eval_steps_per_second": 17.844,
862
+ "step": 61000
863
+ },
864
+ {
865
+ "epoch": 9.72,
866
+ "learning_rate": 1.8709469305124527e-05,
867
+ "loss": 2.6003,
868
+ "step": 62000
869
+ },
870
+ {
871
+ "epoch": 9.72,
872
+ "eval_loss": 3.0796427726745605,
873
+ "eval_runtime": 151.7749,
874
+ "eval_samples_per_second": 74.663,
875
+ "eval_steps_per_second": 18.666,
876
+ "step": 62000
877
+ },
878
+ {
879
+ "epoch": 9.88,
880
+ "learning_rate": 1.8153301965495378e-05,
881
+ "loss": 2.6024,
882
+ "step": 63000
883
+ },
884
+ {
885
+ "epoch": 9.88,
886
+ "eval_loss": 3.076544761657715,
887
+ "eval_runtime": 150.8625,
888
+ "eval_samples_per_second": 75.115,
889
+ "eval_steps_per_second": 18.779,
890
+ "step": 63000
891
+ },
892
+ {
893
+ "epoch": 10.04,
894
+ "learning_rate": 1.7597134625866232e-05,
895
+ "loss": 2.5883,
896
+ "step": 64000
897
+ },
898
+ {
899
+ "epoch": 10.04,
900
+ "eval_loss": 3.0901451110839844,
901
+ "eval_runtime": 150.7672,
902
+ "eval_samples_per_second": 75.162,
903
+ "eval_steps_per_second": 18.791,
904
+ "step": 64000
905
+ },
906
+ {
907
+ "epoch": 10.19,
908
+ "learning_rate": 1.7040967286237083e-05,
909
+ "loss": 2.5393,
910
+ "step": 65000
911
+ },
912
+ {
913
+ "epoch": 10.19,
914
+ "eval_loss": 3.0962793827056885,
915
+ "eval_runtime": 158.5186,
916
+ "eval_samples_per_second": 71.487,
917
+ "eval_steps_per_second": 17.872,
918
+ "step": 65000
919
+ },
920
+ {
921
+ "epoch": 10.35,
922
+ "learning_rate": 1.6484799946607937e-05,
923
+ "loss": 2.5485,
924
+ "step": 66000
925
+ },
926
+ {
927
+ "epoch": 10.35,
928
+ "eval_loss": 3.0939271450042725,
929
+ "eval_runtime": 150.6091,
930
+ "eval_samples_per_second": 75.241,
931
+ "eval_steps_per_second": 18.81,
932
+ "step": 66000
933
+ },
934
+ {
935
+ "epoch": 10.51,
936
+ "learning_rate": 1.5928632606978788e-05,
937
+ "loss": 2.5496,
938
+ "step": 67000
939
+ },
940
+ {
941
+ "epoch": 10.51,
942
+ "eval_loss": 3.092724084854126,
943
+ "eval_runtime": 151.4272,
944
+ "eval_samples_per_second": 74.835,
945
+ "eval_steps_per_second": 18.709,
946
+ "step": 67000
947
+ },
948
+ {
949
+ "epoch": 10.66,
950
+ "learning_rate": 1.5372465267349643e-05,
951
+ "loss": 2.5577,
952
+ "step": 68000
953
+ },
954
+ {
955
+ "epoch": 10.66,
956
+ "eval_loss": 3.0966575145721436,
957
+ "eval_runtime": 152.0136,
958
+ "eval_samples_per_second": 74.546,
959
+ "eval_steps_per_second": 18.636,
960
+ "step": 68000
961
+ },
962
+ {
963
+ "epoch": 10.82,
964
+ "learning_rate": 1.4816297927720494e-05,
965
+ "loss": 2.5598,
966
+ "step": 69000
967
+ },
968
+ {
969
+ "epoch": 10.82,
970
+ "eval_loss": 3.091947078704834,
971
+ "eval_runtime": 150.5612,
972
+ "eval_samples_per_second": 75.265,
973
+ "eval_steps_per_second": 18.816,
974
+ "step": 69000
975
+ },
976
+ {
977
+ "epoch": 10.98,
978
+ "learning_rate": 1.4260130588091345e-05,
979
+ "loss": 2.5623,
980
+ "step": 70000
981
+ },
982
+ {
983
+ "epoch": 10.98,
984
+ "eval_loss": 3.090240955352783,
985
+ "eval_runtime": 151.6176,
986
+ "eval_samples_per_second": 74.741,
987
+ "eval_steps_per_second": 18.685,
988
+ "step": 70000
989
+ },
990
+ {
991
+ "epoch": 11.14,
992
+ "learning_rate": 1.3703963248462199e-05,
993
+ "loss": 2.5138,
994
+ "step": 71000
995
+ },
996
+ {
997
+ "epoch": 11.14,
998
+ "eval_loss": 3.103158473968506,
999
+ "eval_runtime": 150.096,
1000
+ "eval_samples_per_second": 75.498,
1001
+ "eval_steps_per_second": 18.875,
1002
+ "step": 71000
1003
+ },
1004
+ {
1005
+ "epoch": 11.29,
1006
+ "learning_rate": 1.314779590883305e-05,
1007
+ "loss": 2.5142,
1008
+ "step": 72000
1009
+ },
1010
+ {
1011
+ "epoch": 11.29,
1012
+ "eval_loss": 3.1014111042022705,
1013
+ "eval_runtime": 151.6463,
1014
+ "eval_samples_per_second": 74.727,
1015
+ "eval_steps_per_second": 18.682,
1016
+ "step": 72000
1017
+ },
1018
+ {
1019
+ "epoch": 11.45,
1020
+ "learning_rate": 1.2591628569203902e-05,
1021
+ "loss": 2.514,
1022
+ "step": 73000
1023
+ },
1024
+ {
1025
+ "epoch": 11.45,
1026
+ "eval_loss": 3.1006739139556885,
1027
+ "eval_runtime": 158.2805,
1028
+ "eval_samples_per_second": 71.594,
1029
+ "eval_steps_per_second": 17.899,
1030
+ "step": 73000
1031
+ },
1032
+ {
1033
+ "epoch": 11.61,
1034
+ "learning_rate": 1.2035461229574755e-05,
1035
+ "loss": 2.5206,
1036
+ "step": 74000
1037
+ },
1038
+ {
1039
+ "epoch": 11.61,
1040
+ "eval_loss": 3.1010029315948486,
1041
+ "eval_runtime": 150.3118,
1042
+ "eval_samples_per_second": 75.39,
1043
+ "eval_steps_per_second": 18.847,
1044
+ "step": 74000
1045
+ },
1046
+ {
1047
+ "epoch": 11.76,
1048
+ "learning_rate": 1.1479293889945606e-05,
1049
+ "loss": 2.52,
1050
+ "step": 75000
1051
+ },
1052
+ {
1053
+ "epoch": 11.76,
1054
+ "eval_loss": 3.098405361175537,
1055
+ "eval_runtime": 156.5194,
1056
+ "eval_samples_per_second": 72.4,
1057
+ "eval_steps_per_second": 18.1,
1058
+ "step": 75000
1059
+ },
1060
+ {
1061
+ "epoch": 11.92,
1062
+ "learning_rate": 1.0923126550316459e-05,
1063
+ "loss": 2.5204,
1064
+ "step": 76000
1065
+ },
1066
+ {
1067
+ "epoch": 11.92,
1068
+ "eval_loss": 3.0984325408935547,
1069
+ "eval_runtime": 151.2164,
1070
+ "eval_samples_per_second": 74.939,
1071
+ "eval_steps_per_second": 18.735,
1072
+ "step": 76000
1073
+ }
1074
+ ],
1075
+ "max_steps": 95640,
1076
+ "num_train_epochs": 15,
1077
+ "total_flos": 1.1223588676956365e+18,
1078
+ "trial_name": null,
1079
+ "trial_params": null
1080
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6add11d31386a3b4a153ab52e630b6d43be42e2d14fedb249c32f294570a4203
3
+ size 3183
vocab.json ADDED
The diff for this file is too large to render. See raw diff