nRuaif commited on
Commit
d656403
·
1 Parent(s): 7d5da20

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -4,6 +4,18 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - quant_method: bitsandbytes
9
  - load_in_8bit: False
@@ -17,5 +29,6 @@ The following `bitsandbytes` quantization config was used during training:
17
  - bnb_4bit_compute_dtype: bfloat16
18
  ### Framework versions
19
 
 
20
 
21
  - PEFT 0.6.0.dev0
 
4
  ## Training procedure
5
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+
19
  The following `bitsandbytes` quantization config was used during training:
20
  - quant_method: bitsandbytes
21
  - load_in_8bit: False
 
29
  - bnb_4bit_compute_dtype: bfloat16
30
  ### Framework versions
31
 
32
+ - PEFT 0.6.0.dev0
33
 
34
  - PEFT 0.6.0.dev0
adapter_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": null,
6
- "inference_mode": false,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
 
3
  "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a1fe333bc20ae357293765749812056106e52a45864fbc6469f3431ba144709
3
- size 62788109
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60540e78b597d2aa27525b935a7ea66d4d6cfd9a7f9cbbfb972ede8efbdb70c2
3
+ size 31494413
checkpoint-110/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-110/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "up_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "k_proj",
21
+ "q_proj",
22
+ "o_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-110/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c548ae89c62713bef9b8c64045448ce6e602636587c87701d8f48fc5c10083
3
+ size 31494413
checkpoint-110/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-110/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "up_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "k_proj",
21
+ "q_proj",
22
+ "o_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-110/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92c548ae89c62713bef9b8c64045448ce6e602636587c87701d8f48fc5c10083
3
+ size 31494413
checkpoint-110/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:044296fff79536c3aa33d4c3b19a8e4566aecb27b38272aa84d0e2f4c8f1f6ac
3
+ size 125504317
checkpoint-110/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdabf7d96511142a7a0ca0702f874dde2521c420a52b309f5bc7662800afd34b
3
+ size 14575
checkpoint-110/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e26922f16313e4404571e585ab29b36783c8d1e7504b5884fb2a1712fb16557
3
+ size 627
checkpoint-110/trainer_state.json ADDED
@@ -0,0 +1,679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.525107604017217,
5
+ "eval_steps": 500,
6
+ "global_step": 110,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 5e-05,
14
+ "loss": 1.7667,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 5e-05,
20
+ "loss": 1.7274,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 5e-05,
26
+ "loss": 1.656,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "learning_rate": 5e-05,
32
+ "loss": 1.7964,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.11,
37
+ "learning_rate": 5e-05,
38
+ "loss": 1.6153,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.14,
43
+ "learning_rate": 5e-05,
44
+ "loss": 1.7931,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.16,
49
+ "learning_rate": 5e-05,
50
+ "loss": 1.7548,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.18,
55
+ "learning_rate": 5e-05,
56
+ "loss": 1.7054,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.21,
61
+ "learning_rate": 5e-05,
62
+ "loss": 1.7311,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.23,
67
+ "learning_rate": 5e-05,
68
+ "loss": 1.6827,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.25,
73
+ "learning_rate": 5e-05,
74
+ "loss": 1.7158,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.28,
79
+ "learning_rate": 5e-05,
80
+ "loss": 1.6599,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.3,
85
+ "learning_rate": 5e-05,
86
+ "loss": 1.6991,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.32,
91
+ "learning_rate": 5e-05,
92
+ "loss": 1.6628,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.34,
97
+ "learning_rate": 5e-05,
98
+ "loss": 1.6188,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.37,
103
+ "learning_rate": 5e-05,
104
+ "loss": 1.6868,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.39,
109
+ "learning_rate": 5e-05,
110
+ "loss": 1.6854,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.41,
115
+ "learning_rate": 5e-05,
116
+ "loss": 1.5938,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.44,
121
+ "learning_rate": 5e-05,
122
+ "loss": 1.6603,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.46,
127
+ "learning_rate": 5e-05,
128
+ "loss": 1.561,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.48,
133
+ "learning_rate": 5e-05,
134
+ "loss": 1.6162,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.51,
139
+ "learning_rate": 5e-05,
140
+ "loss": 1.6159,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.53,
145
+ "learning_rate": 5e-05,
146
+ "loss": 1.6414,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.55,
151
+ "learning_rate": 5e-05,
152
+ "loss": 1.6915,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.57,
157
+ "learning_rate": 5e-05,
158
+ "loss": 1.6853,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.6,
163
+ "learning_rate": 5e-05,
164
+ "loss": 1.6563,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.62,
169
+ "learning_rate": 5e-05,
170
+ "loss": 1.6622,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.64,
175
+ "learning_rate": 5e-05,
176
+ "loss": 1.638,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.67,
181
+ "learning_rate": 5e-05,
182
+ "loss": 1.66,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.69,
187
+ "learning_rate": 5e-05,
188
+ "loss": 1.6565,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.71,
193
+ "learning_rate": 5e-05,
194
+ "loss": 1.6076,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.73,
199
+ "learning_rate": 5e-05,
200
+ "loss": 1.6971,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.76,
205
+ "learning_rate": 5e-05,
206
+ "loss": 1.6836,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.78,
211
+ "learning_rate": 5e-05,
212
+ "loss": 1.587,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.8,
217
+ "learning_rate": 5e-05,
218
+ "loss": 1.7034,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.83,
223
+ "learning_rate": 5e-05,
224
+ "loss": 1.705,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.85,
229
+ "learning_rate": 5e-05,
230
+ "loss": 1.5491,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.87,
235
+ "learning_rate": 5e-05,
236
+ "loss": 1.611,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.9,
241
+ "learning_rate": 5e-05,
242
+ "loss": 1.6795,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.92,
247
+ "learning_rate": 5e-05,
248
+ "loss": 1.5823,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.94,
253
+ "learning_rate": 5e-05,
254
+ "loss": 1.6066,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.96,
259
+ "learning_rate": 5e-05,
260
+ "loss": 1.6279,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.99,
265
+ "learning_rate": 5e-05,
266
+ "loss": 1.5756,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 1.01,
271
+ "learning_rate": 5e-05,
272
+ "loss": 1.6426,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 1.03,
277
+ "learning_rate": 5e-05,
278
+ "loss": 1.6566,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 1.06,
283
+ "learning_rate": 5e-05,
284
+ "loss": 1.6325,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 1.08,
289
+ "learning_rate": 5e-05,
290
+ "loss": 1.7233,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 1.1,
295
+ "learning_rate": 5e-05,
296
+ "loss": 1.5899,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 1.12,
301
+ "learning_rate": 5e-05,
302
+ "loss": 1.5425,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 1.15,
307
+ "learning_rate": 5e-05,
308
+ "loss": 1.4761,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 1.17,
313
+ "learning_rate": 5e-05,
314
+ "loss": 1.6153,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 1.19,
319
+ "learning_rate": 5e-05,
320
+ "loss": 1.6099,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 1.22,
325
+ "learning_rate": 5e-05,
326
+ "loss": 1.6017,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 1.24,
331
+ "learning_rate": 5e-05,
332
+ "loss": 1.5573,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 1.26,
337
+ "learning_rate": 5e-05,
338
+ "loss": 1.5365,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 1.29,
343
+ "learning_rate": 5e-05,
344
+ "loss": 1.6471,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 1.31,
349
+ "learning_rate": 5e-05,
350
+ "loss": 1.5963,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 1.33,
355
+ "learning_rate": 5e-05,
356
+ "loss": 1.6854,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 1.35,
361
+ "learning_rate": 5e-05,
362
+ "loss": 1.5597,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 1.38,
367
+ "learning_rate": 5e-05,
368
+ "loss": 1.66,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 1.4,
373
+ "learning_rate": 5e-05,
374
+ "loss": 1.6862,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 1.42,
379
+ "learning_rate": 5e-05,
380
+ "loss": 1.7041,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 1.45,
385
+ "learning_rate": 5e-05,
386
+ "loss": 1.6115,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 1.47,
391
+ "learning_rate": 5e-05,
392
+ "loss": 1.5222,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 1.49,
397
+ "learning_rate": 5e-05,
398
+ "loss": 1.5516,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 1.52,
403
+ "learning_rate": 5e-05,
404
+ "loss": 1.4681,
405
+ "step": 66
406
+ },
407
+ {
408
+ "epoch": 1.54,
409
+ "learning_rate": 5e-05,
410
+ "loss": 1.5683,
411
+ "step": 67
412
+ },
413
+ {
414
+ "epoch": 1.56,
415
+ "learning_rate": 5e-05,
416
+ "loss": 1.7625,
417
+ "step": 68
418
+ },
419
+ {
420
+ "epoch": 1.58,
421
+ "learning_rate": 5e-05,
422
+ "loss": 1.5548,
423
+ "step": 69
424
+ },
425
+ {
426
+ "epoch": 1.61,
427
+ "learning_rate": 5e-05,
428
+ "loss": 1.5915,
429
+ "step": 70
430
+ },
431
+ {
432
+ "epoch": 1.63,
433
+ "learning_rate": 5e-05,
434
+ "loss": 1.6061,
435
+ "step": 71
436
+ },
437
+ {
438
+ "epoch": 1.65,
439
+ "learning_rate": 5e-05,
440
+ "loss": 1.6822,
441
+ "step": 72
442
+ },
443
+ {
444
+ "epoch": 1.68,
445
+ "learning_rate": 5e-05,
446
+ "loss": 1.7447,
447
+ "step": 73
448
+ },
449
+ {
450
+ "epoch": 1.7,
451
+ "learning_rate": 5e-05,
452
+ "loss": 1.6758,
453
+ "step": 74
454
+ },
455
+ {
456
+ "epoch": 1.72,
457
+ "learning_rate": 5e-05,
458
+ "loss": 1.5201,
459
+ "step": 75
460
+ },
461
+ {
462
+ "epoch": 1.74,
463
+ "learning_rate": 5e-05,
464
+ "loss": 1.553,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 1.77,
469
+ "learning_rate": 5e-05,
470
+ "loss": 1.6177,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 1.79,
475
+ "learning_rate": 5e-05,
476
+ "loss": 1.6863,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 1.81,
481
+ "learning_rate": 5e-05,
482
+ "loss": 1.5896,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 1.84,
487
+ "learning_rate": 5e-05,
488
+ "loss": 1.5084,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 1.86,
493
+ "learning_rate": 5e-05,
494
+ "loss": 1.6088,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 1.88,
499
+ "learning_rate": 5e-05,
500
+ "loss": 1.5212,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 1.91,
505
+ "learning_rate": 5e-05,
506
+ "loss": 1.5589,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 1.93,
511
+ "learning_rate": 5e-05,
512
+ "loss": 1.5656,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 1.95,
517
+ "learning_rate": 5e-05,
518
+ "loss": 1.51,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 1.97,
523
+ "learning_rate": 5e-05,
524
+ "loss": 1.628,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 2.0,
529
+ "learning_rate": 5e-05,
530
+ "loss": 1.5363,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 2.02,
535
+ "learning_rate": 5e-05,
536
+ "loss": 1.5808,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 2.04,
541
+ "learning_rate": 5e-05,
542
+ "loss": 1.5631,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 2.07,
547
+ "learning_rate": 5e-05,
548
+ "loss": 1.6611,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 2.09,
553
+ "learning_rate": 5e-05,
554
+ "loss": 1.659,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 2.11,
559
+ "learning_rate": 5e-05,
560
+ "loss": 1.5854,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 2.13,
565
+ "learning_rate": 5e-05,
566
+ "loss": 1.5812,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 2.16,
571
+ "learning_rate": 5e-05,
572
+ "loss": 1.6634,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 2.18,
577
+ "learning_rate": 5e-05,
578
+ "loss": 1.5291,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 2.2,
583
+ "learning_rate": 5e-05,
584
+ "loss": 1.61,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 2.23,
589
+ "learning_rate": 5e-05,
590
+ "loss": 1.5201,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 2.25,
595
+ "learning_rate": 5e-05,
596
+ "loss": 1.5196,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 2.27,
601
+ "learning_rate": 5e-05,
602
+ "loss": 1.6469,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 2.3,
607
+ "learning_rate": 5e-05,
608
+ "loss": 1.6515,
609
+ "step": 100
610
+ },
611
+ {
612
+ "epoch": 2.32,
613
+ "learning_rate": 5e-05,
614
+ "loss": 1.6196,
615
+ "step": 101
616
+ },
617
+ {
618
+ "epoch": 2.34,
619
+ "learning_rate": 5e-05,
620
+ "loss": 1.5504,
621
+ "step": 102
622
+ },
623
+ {
624
+ "epoch": 2.36,
625
+ "learning_rate": 5e-05,
626
+ "loss": 1.6673,
627
+ "step": 103
628
+ },
629
+ {
630
+ "epoch": 2.39,
631
+ "learning_rate": 5e-05,
632
+ "loss": 1.6073,
633
+ "step": 104
634
+ },
635
+ {
636
+ "epoch": 2.41,
637
+ "learning_rate": 5e-05,
638
+ "loss": 1.4973,
639
+ "step": 105
640
+ },
641
+ {
642
+ "epoch": 2.43,
643
+ "learning_rate": 5e-05,
644
+ "loss": 1.546,
645
+ "step": 106
646
+ },
647
+ {
648
+ "epoch": 2.46,
649
+ "learning_rate": 5e-05,
650
+ "loss": 1.6663,
651
+ "step": 107
652
+ },
653
+ {
654
+ "epoch": 2.48,
655
+ "learning_rate": 5e-05,
656
+ "loss": 1.5919,
657
+ "step": 108
658
+ },
659
+ {
660
+ "epoch": 2.5,
661
+ "learning_rate": 5e-05,
662
+ "loss": 1.5186,
663
+ "step": 109
664
+ },
665
+ {
666
+ "epoch": 2.53,
667
+ "learning_rate": 5e-05,
668
+ "loss": 1.6186,
669
+ "step": 110
670
+ }
671
+ ],
672
+ "logging_steps": 1,
673
+ "max_steps": 129,
674
+ "num_train_epochs": 3,
675
+ "save_steps": 10,
676
+ "total_flos": 4.876998535544832e+17,
677
+ "trial_name": null,
678
+ "trial_params": null
679
+ }
checkpoint-110/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056b2d35a8edd0b4c4e1fa596b4bfb9c414a316aae917663d8deeb089d445a4e
3
+ size 4219
checkpoint-120/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-120/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "up_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "k_proj",
21
+ "q_proj",
22
+ "o_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-120/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd6da81609e9873a3547ef90fe3e0a4e53bd4d7c8da2fd81df68a914602940b
3
+ size 31494413
checkpoint-120/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-120/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 4,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "up_proj",
18
+ "v_proj",
19
+ "gate_proj",
20
+ "k_proj",
21
+ "q_proj",
22
+ "o_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-120/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd6da81609e9873a3547ef90fe3e0a4e53bd4d7c8da2fd81df68a914602940b
3
+ size 31494413
checkpoint-120/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f6302bba1883aaca4e12c4406a83a1b98a73d08b9990d7425c8d7cefd3d9fb
3
+ size 125504317
checkpoint-120/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca8df65761a62a34ef635f3a6ab88b3af73c75d5d410ece5682530177d622d68
3
+ size 14575
checkpoint-120/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d275f154c30962be2d1bf4bb66176ddf3c4c95a4d36e572b0f2a92a38361510
3
+ size 627
checkpoint-120/trainer_state.json ADDED
@@ -0,0 +1,739 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.7546628407460547,
5
+ "eval_steps": 500,
6
+ "global_step": 120,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 5e-05,
14
+ "loss": 1.7667,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.05,
19
+ "learning_rate": 5e-05,
20
+ "loss": 1.7274,
21
+ "step": 2
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 5e-05,
26
+ "loss": 1.656,
27
+ "step": 3
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "learning_rate": 5e-05,
32
+ "loss": 1.7964,
33
+ "step": 4
34
+ },
35
+ {
36
+ "epoch": 0.11,
37
+ "learning_rate": 5e-05,
38
+ "loss": 1.6153,
39
+ "step": 5
40
+ },
41
+ {
42
+ "epoch": 0.14,
43
+ "learning_rate": 5e-05,
44
+ "loss": 1.7931,
45
+ "step": 6
46
+ },
47
+ {
48
+ "epoch": 0.16,
49
+ "learning_rate": 5e-05,
50
+ "loss": 1.7548,
51
+ "step": 7
52
+ },
53
+ {
54
+ "epoch": 0.18,
55
+ "learning_rate": 5e-05,
56
+ "loss": 1.7054,
57
+ "step": 8
58
+ },
59
+ {
60
+ "epoch": 0.21,
61
+ "learning_rate": 5e-05,
62
+ "loss": 1.7311,
63
+ "step": 9
64
+ },
65
+ {
66
+ "epoch": 0.23,
67
+ "learning_rate": 5e-05,
68
+ "loss": 1.6827,
69
+ "step": 10
70
+ },
71
+ {
72
+ "epoch": 0.25,
73
+ "learning_rate": 5e-05,
74
+ "loss": 1.7158,
75
+ "step": 11
76
+ },
77
+ {
78
+ "epoch": 0.28,
79
+ "learning_rate": 5e-05,
80
+ "loss": 1.6599,
81
+ "step": 12
82
+ },
83
+ {
84
+ "epoch": 0.3,
85
+ "learning_rate": 5e-05,
86
+ "loss": 1.6991,
87
+ "step": 13
88
+ },
89
+ {
90
+ "epoch": 0.32,
91
+ "learning_rate": 5e-05,
92
+ "loss": 1.6628,
93
+ "step": 14
94
+ },
95
+ {
96
+ "epoch": 0.34,
97
+ "learning_rate": 5e-05,
98
+ "loss": 1.6188,
99
+ "step": 15
100
+ },
101
+ {
102
+ "epoch": 0.37,
103
+ "learning_rate": 5e-05,
104
+ "loss": 1.6868,
105
+ "step": 16
106
+ },
107
+ {
108
+ "epoch": 0.39,
109
+ "learning_rate": 5e-05,
110
+ "loss": 1.6854,
111
+ "step": 17
112
+ },
113
+ {
114
+ "epoch": 0.41,
115
+ "learning_rate": 5e-05,
116
+ "loss": 1.5938,
117
+ "step": 18
118
+ },
119
+ {
120
+ "epoch": 0.44,
121
+ "learning_rate": 5e-05,
122
+ "loss": 1.6603,
123
+ "step": 19
124
+ },
125
+ {
126
+ "epoch": 0.46,
127
+ "learning_rate": 5e-05,
128
+ "loss": 1.561,
129
+ "step": 20
130
+ },
131
+ {
132
+ "epoch": 0.48,
133
+ "learning_rate": 5e-05,
134
+ "loss": 1.6162,
135
+ "step": 21
136
+ },
137
+ {
138
+ "epoch": 0.51,
139
+ "learning_rate": 5e-05,
140
+ "loss": 1.6159,
141
+ "step": 22
142
+ },
143
+ {
144
+ "epoch": 0.53,
145
+ "learning_rate": 5e-05,
146
+ "loss": 1.6414,
147
+ "step": 23
148
+ },
149
+ {
150
+ "epoch": 0.55,
151
+ "learning_rate": 5e-05,
152
+ "loss": 1.6915,
153
+ "step": 24
154
+ },
155
+ {
156
+ "epoch": 0.57,
157
+ "learning_rate": 5e-05,
158
+ "loss": 1.6853,
159
+ "step": 25
160
+ },
161
+ {
162
+ "epoch": 0.6,
163
+ "learning_rate": 5e-05,
164
+ "loss": 1.6563,
165
+ "step": 26
166
+ },
167
+ {
168
+ "epoch": 0.62,
169
+ "learning_rate": 5e-05,
170
+ "loss": 1.6622,
171
+ "step": 27
172
+ },
173
+ {
174
+ "epoch": 0.64,
175
+ "learning_rate": 5e-05,
176
+ "loss": 1.638,
177
+ "step": 28
178
+ },
179
+ {
180
+ "epoch": 0.67,
181
+ "learning_rate": 5e-05,
182
+ "loss": 1.66,
183
+ "step": 29
184
+ },
185
+ {
186
+ "epoch": 0.69,
187
+ "learning_rate": 5e-05,
188
+ "loss": 1.6565,
189
+ "step": 30
190
+ },
191
+ {
192
+ "epoch": 0.71,
193
+ "learning_rate": 5e-05,
194
+ "loss": 1.6076,
195
+ "step": 31
196
+ },
197
+ {
198
+ "epoch": 0.73,
199
+ "learning_rate": 5e-05,
200
+ "loss": 1.6971,
201
+ "step": 32
202
+ },
203
+ {
204
+ "epoch": 0.76,
205
+ "learning_rate": 5e-05,
206
+ "loss": 1.6836,
207
+ "step": 33
208
+ },
209
+ {
210
+ "epoch": 0.78,
211
+ "learning_rate": 5e-05,
212
+ "loss": 1.587,
213
+ "step": 34
214
+ },
215
+ {
216
+ "epoch": 0.8,
217
+ "learning_rate": 5e-05,
218
+ "loss": 1.7034,
219
+ "step": 35
220
+ },
221
+ {
222
+ "epoch": 0.83,
223
+ "learning_rate": 5e-05,
224
+ "loss": 1.705,
225
+ "step": 36
226
+ },
227
+ {
228
+ "epoch": 0.85,
229
+ "learning_rate": 5e-05,
230
+ "loss": 1.5491,
231
+ "step": 37
232
+ },
233
+ {
234
+ "epoch": 0.87,
235
+ "learning_rate": 5e-05,
236
+ "loss": 1.611,
237
+ "step": 38
238
+ },
239
+ {
240
+ "epoch": 0.9,
241
+ "learning_rate": 5e-05,
242
+ "loss": 1.6795,
243
+ "step": 39
244
+ },
245
+ {
246
+ "epoch": 0.92,
247
+ "learning_rate": 5e-05,
248
+ "loss": 1.5823,
249
+ "step": 40
250
+ },
251
+ {
252
+ "epoch": 0.94,
253
+ "learning_rate": 5e-05,
254
+ "loss": 1.6066,
255
+ "step": 41
256
+ },
257
+ {
258
+ "epoch": 0.96,
259
+ "learning_rate": 5e-05,
260
+ "loss": 1.6279,
261
+ "step": 42
262
+ },
263
+ {
264
+ "epoch": 0.99,
265
+ "learning_rate": 5e-05,
266
+ "loss": 1.5756,
267
+ "step": 43
268
+ },
269
+ {
270
+ "epoch": 1.01,
271
+ "learning_rate": 5e-05,
272
+ "loss": 1.6426,
273
+ "step": 44
274
+ },
275
+ {
276
+ "epoch": 1.03,
277
+ "learning_rate": 5e-05,
278
+ "loss": 1.6566,
279
+ "step": 45
280
+ },
281
+ {
282
+ "epoch": 1.06,
283
+ "learning_rate": 5e-05,
284
+ "loss": 1.6325,
285
+ "step": 46
286
+ },
287
+ {
288
+ "epoch": 1.08,
289
+ "learning_rate": 5e-05,
290
+ "loss": 1.7233,
291
+ "step": 47
292
+ },
293
+ {
294
+ "epoch": 1.1,
295
+ "learning_rate": 5e-05,
296
+ "loss": 1.5899,
297
+ "step": 48
298
+ },
299
+ {
300
+ "epoch": 1.12,
301
+ "learning_rate": 5e-05,
302
+ "loss": 1.5425,
303
+ "step": 49
304
+ },
305
+ {
306
+ "epoch": 1.15,
307
+ "learning_rate": 5e-05,
308
+ "loss": 1.4761,
309
+ "step": 50
310
+ },
311
+ {
312
+ "epoch": 1.17,
313
+ "learning_rate": 5e-05,
314
+ "loss": 1.6153,
315
+ "step": 51
316
+ },
317
+ {
318
+ "epoch": 1.19,
319
+ "learning_rate": 5e-05,
320
+ "loss": 1.6099,
321
+ "step": 52
322
+ },
323
+ {
324
+ "epoch": 1.22,
325
+ "learning_rate": 5e-05,
326
+ "loss": 1.6017,
327
+ "step": 53
328
+ },
329
+ {
330
+ "epoch": 1.24,
331
+ "learning_rate": 5e-05,
332
+ "loss": 1.5573,
333
+ "step": 54
334
+ },
335
+ {
336
+ "epoch": 1.26,
337
+ "learning_rate": 5e-05,
338
+ "loss": 1.5365,
339
+ "step": 55
340
+ },
341
+ {
342
+ "epoch": 1.29,
343
+ "learning_rate": 5e-05,
344
+ "loss": 1.6471,
345
+ "step": 56
346
+ },
347
+ {
348
+ "epoch": 1.31,
349
+ "learning_rate": 5e-05,
350
+ "loss": 1.5963,
351
+ "step": 57
352
+ },
353
+ {
354
+ "epoch": 1.33,
355
+ "learning_rate": 5e-05,
356
+ "loss": 1.6854,
357
+ "step": 58
358
+ },
359
+ {
360
+ "epoch": 1.35,
361
+ "learning_rate": 5e-05,
362
+ "loss": 1.5597,
363
+ "step": 59
364
+ },
365
+ {
366
+ "epoch": 1.38,
367
+ "learning_rate": 5e-05,
368
+ "loss": 1.66,
369
+ "step": 60
370
+ },
371
+ {
372
+ "epoch": 1.4,
373
+ "learning_rate": 5e-05,
374
+ "loss": 1.6862,
375
+ "step": 61
376
+ },
377
+ {
378
+ "epoch": 1.42,
379
+ "learning_rate": 5e-05,
380
+ "loss": 1.7041,
381
+ "step": 62
382
+ },
383
+ {
384
+ "epoch": 1.45,
385
+ "learning_rate": 5e-05,
386
+ "loss": 1.6115,
387
+ "step": 63
388
+ },
389
+ {
390
+ "epoch": 1.47,
391
+ "learning_rate": 5e-05,
392
+ "loss": 1.5222,
393
+ "step": 64
394
+ },
395
+ {
396
+ "epoch": 1.49,
397
+ "learning_rate": 5e-05,
398
+ "loss": 1.5516,
399
+ "step": 65
400
+ },
401
+ {
402
+ "epoch": 1.52,
403
+ "learning_rate": 5e-05,
404
+ "loss": 1.4681,
405
+ "step": 66
406
+ },
407
+ {
408
+ "epoch": 1.54,
409
+ "learning_rate": 5e-05,
410
+ "loss": 1.5683,
411
+ "step": 67
412
+ },
413
+ {
414
+ "epoch": 1.56,
415
+ "learning_rate": 5e-05,
416
+ "loss": 1.7625,
417
+ "step": 68
418
+ },
419
+ {
420
+ "epoch": 1.58,
421
+ "learning_rate": 5e-05,
422
+ "loss": 1.5548,
423
+ "step": 69
424
+ },
425
+ {
426
+ "epoch": 1.61,
427
+ "learning_rate": 5e-05,
428
+ "loss": 1.5915,
429
+ "step": 70
430
+ },
431
+ {
432
+ "epoch": 1.63,
433
+ "learning_rate": 5e-05,
434
+ "loss": 1.6061,
435
+ "step": 71
436
+ },
437
+ {
438
+ "epoch": 1.65,
439
+ "learning_rate": 5e-05,
440
+ "loss": 1.6822,
441
+ "step": 72
442
+ },
443
+ {
444
+ "epoch": 1.68,
445
+ "learning_rate": 5e-05,
446
+ "loss": 1.7447,
447
+ "step": 73
448
+ },
449
+ {
450
+ "epoch": 1.7,
451
+ "learning_rate": 5e-05,
452
+ "loss": 1.6758,
453
+ "step": 74
454
+ },
455
+ {
456
+ "epoch": 1.72,
457
+ "learning_rate": 5e-05,
458
+ "loss": 1.5201,
459
+ "step": 75
460
+ },
461
+ {
462
+ "epoch": 1.74,
463
+ "learning_rate": 5e-05,
464
+ "loss": 1.553,
465
+ "step": 76
466
+ },
467
+ {
468
+ "epoch": 1.77,
469
+ "learning_rate": 5e-05,
470
+ "loss": 1.6177,
471
+ "step": 77
472
+ },
473
+ {
474
+ "epoch": 1.79,
475
+ "learning_rate": 5e-05,
476
+ "loss": 1.6863,
477
+ "step": 78
478
+ },
479
+ {
480
+ "epoch": 1.81,
481
+ "learning_rate": 5e-05,
482
+ "loss": 1.5896,
483
+ "step": 79
484
+ },
485
+ {
486
+ "epoch": 1.84,
487
+ "learning_rate": 5e-05,
488
+ "loss": 1.5084,
489
+ "step": 80
490
+ },
491
+ {
492
+ "epoch": 1.86,
493
+ "learning_rate": 5e-05,
494
+ "loss": 1.6088,
495
+ "step": 81
496
+ },
497
+ {
498
+ "epoch": 1.88,
499
+ "learning_rate": 5e-05,
500
+ "loss": 1.5212,
501
+ "step": 82
502
+ },
503
+ {
504
+ "epoch": 1.91,
505
+ "learning_rate": 5e-05,
506
+ "loss": 1.5589,
507
+ "step": 83
508
+ },
509
+ {
510
+ "epoch": 1.93,
511
+ "learning_rate": 5e-05,
512
+ "loss": 1.5656,
513
+ "step": 84
514
+ },
515
+ {
516
+ "epoch": 1.95,
517
+ "learning_rate": 5e-05,
518
+ "loss": 1.51,
519
+ "step": 85
520
+ },
521
+ {
522
+ "epoch": 1.97,
523
+ "learning_rate": 5e-05,
524
+ "loss": 1.628,
525
+ "step": 86
526
+ },
527
+ {
528
+ "epoch": 2.0,
529
+ "learning_rate": 5e-05,
530
+ "loss": 1.5363,
531
+ "step": 87
532
+ },
533
+ {
534
+ "epoch": 2.02,
535
+ "learning_rate": 5e-05,
536
+ "loss": 1.5808,
537
+ "step": 88
538
+ },
539
+ {
540
+ "epoch": 2.04,
541
+ "learning_rate": 5e-05,
542
+ "loss": 1.5631,
543
+ "step": 89
544
+ },
545
+ {
546
+ "epoch": 2.07,
547
+ "learning_rate": 5e-05,
548
+ "loss": 1.6611,
549
+ "step": 90
550
+ },
551
+ {
552
+ "epoch": 2.09,
553
+ "learning_rate": 5e-05,
554
+ "loss": 1.659,
555
+ "step": 91
556
+ },
557
+ {
558
+ "epoch": 2.11,
559
+ "learning_rate": 5e-05,
560
+ "loss": 1.5854,
561
+ "step": 92
562
+ },
563
+ {
564
+ "epoch": 2.13,
565
+ "learning_rate": 5e-05,
566
+ "loss": 1.5812,
567
+ "step": 93
568
+ },
569
+ {
570
+ "epoch": 2.16,
571
+ "learning_rate": 5e-05,
572
+ "loss": 1.6634,
573
+ "step": 94
574
+ },
575
+ {
576
+ "epoch": 2.18,
577
+ "learning_rate": 5e-05,
578
+ "loss": 1.5291,
579
+ "step": 95
580
+ },
581
+ {
582
+ "epoch": 2.2,
583
+ "learning_rate": 5e-05,
584
+ "loss": 1.61,
585
+ "step": 96
586
+ },
587
+ {
588
+ "epoch": 2.23,
589
+ "learning_rate": 5e-05,
590
+ "loss": 1.5201,
591
+ "step": 97
592
+ },
593
+ {
594
+ "epoch": 2.25,
595
+ "learning_rate": 5e-05,
596
+ "loss": 1.5196,
597
+ "step": 98
598
+ },
599
+ {
600
+ "epoch": 2.27,
601
+ "learning_rate": 5e-05,
602
+ "loss": 1.6469,
603
+ "step": 99
604
+ },
605
+ {
606
+ "epoch": 2.3,
607
+ "learning_rate": 5e-05,
608
+ "loss": 1.6515,
609
+ "step": 100
610
+ },
611
+ {
612
+ "epoch": 2.32,
613
+ "learning_rate": 5e-05,
614
+ "loss": 1.6196,
615
+ "step": 101
616
+ },
617
+ {
618
+ "epoch": 2.34,
619
+ "learning_rate": 5e-05,
620
+ "loss": 1.5504,
621
+ "step": 102
622
+ },
623
+ {
624
+ "epoch": 2.36,
625
+ "learning_rate": 5e-05,
626
+ "loss": 1.6673,
627
+ "step": 103
628
+ },
629
+ {
630
+ "epoch": 2.39,
631
+ "learning_rate": 5e-05,
632
+ "loss": 1.6073,
633
+ "step": 104
634
+ },
635
+ {
636
+ "epoch": 2.41,
637
+ "learning_rate": 5e-05,
638
+ "loss": 1.4973,
639
+ "step": 105
640
+ },
641
+ {
642
+ "epoch": 2.43,
643
+ "learning_rate": 5e-05,
644
+ "loss": 1.546,
645
+ "step": 106
646
+ },
647
+ {
648
+ "epoch": 2.46,
649
+ "learning_rate": 5e-05,
650
+ "loss": 1.6663,
651
+ "step": 107
652
+ },
653
+ {
654
+ "epoch": 2.48,
655
+ "learning_rate": 5e-05,
656
+ "loss": 1.5919,
657
+ "step": 108
658
+ },
659
+ {
660
+ "epoch": 2.5,
661
+ "learning_rate": 5e-05,
662
+ "loss": 1.5186,
663
+ "step": 109
664
+ },
665
+ {
666
+ "epoch": 2.53,
667
+ "learning_rate": 5e-05,
668
+ "loss": 1.6186,
669
+ "step": 110
670
+ },
671
+ {
672
+ "epoch": 2.55,
673
+ "learning_rate": 5e-05,
674
+ "loss": 1.529,
675
+ "step": 111
676
+ },
677
+ {
678
+ "epoch": 2.57,
679
+ "learning_rate": 5e-05,
680
+ "loss": 1.6125,
681
+ "step": 112
682
+ },
683
+ {
684
+ "epoch": 2.59,
685
+ "learning_rate": 5e-05,
686
+ "loss": 1.4831,
687
+ "step": 113
688
+ },
689
+ {
690
+ "epoch": 2.62,
691
+ "learning_rate": 5e-05,
692
+ "loss": 1.5044,
693
+ "step": 114
694
+ },
695
+ {
696
+ "epoch": 2.64,
697
+ "learning_rate": 5e-05,
698
+ "loss": 1.6618,
699
+ "step": 115
700
+ },
701
+ {
702
+ "epoch": 2.66,
703
+ "learning_rate": 5e-05,
704
+ "loss": 1.502,
705
+ "step": 116
706
+ },
707
+ {
708
+ "epoch": 2.69,
709
+ "learning_rate": 5e-05,
710
+ "loss": 1.5951,
711
+ "step": 117
712
+ },
713
+ {
714
+ "epoch": 2.71,
715
+ "learning_rate": 5e-05,
716
+ "loss": 1.6201,
717
+ "step": 118
718
+ },
719
+ {
720
+ "epoch": 2.73,
721
+ "learning_rate": 5e-05,
722
+ "loss": 1.5698,
723
+ "step": 119
724
+ },
725
+ {
726
+ "epoch": 2.75,
727
+ "learning_rate": 5e-05,
728
+ "loss": 1.6591,
729
+ "step": 120
730
+ }
731
+ ],
732
+ "logging_steps": 1,
733
+ "max_steps": 129,
734
+ "num_train_epochs": 3,
735
+ "save_steps": 10,
736
+ "total_flos": 5.318653991583744e+17,
737
+ "trial_name": null,
738
+ "trial_params": null
739
+ }
checkpoint-120/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:056b2d35a8edd0b4c4e1fa596b4bfb9c414a316aae917663d8deeb089d445a4e
3
+ size 4219