SystemAdmin123 commited on
Commit
d87410b
·
verified ·
1 Parent(s): ebf2025

Training in progress, step 400, checkpoint

Browse files
last-checkpoint/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "fxmarty/tiny-dummy-qwen2",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "peft-internal-testing/tiny-dummy-qwen2",
3
  "architectures": [
4
  "Qwen2ForCausalLM"
5
  ],
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89fe48d90293c6de8a412c54d1431a33bdd6255c0ce7cf015902d4131b92c913
3
  size 2433024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d74a2d30e9b5eaccdc8860d90690f3f6102f6de30d1c8b6b8ec10411ff53d836
3
  size 2433024
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf7ecc2f0c8d284946a757ca8f3ded322c622c5f84f3780795fb07bff724c053
3
  size 2498406
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b7c01f6981a3ced8f46ae6b6f209864546e601aaf3a635f661d708f4b18efaf
3
  size 2498406
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9feae33b2fec0a6229240e7adaee6ecc8f5cfdf1a8bd0e827b1d8a241424e3c0
3
+ size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f0c7ff54b85ffef1dcbffa5d8d256d7b7e02cf6f2a611b338e53d605c1ee098
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a673aaf85c0fe6b6c29cb8f3e7dbd829eef637110e4ad9a775f3fcf001c92591
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,203 +1,323 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1893491124260355,
5
- "eval_steps": 20,
6
- "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.001183431952662722,
13
- "eval_loss": 11.929322242736816,
14
- "eval_runtime": 3.5424,
15
- "eval_samples_per_second": 424.005,
16
- "eval_steps_per_second": 26.536,
17
  "step": 1
18
  },
19
  {
20
- "epoch": 0.011834319526627219,
21
- "grad_norm": 0.19140625,
22
  "learning_rate": 1.6000000000000003e-05,
23
- "loss": 11.9299,
24
  "step": 10
25
  },
26
  {
27
- "epoch": 0.023668639053254437,
28
- "grad_norm": 0.12158203125,
29
  "learning_rate": 3.2000000000000005e-05,
30
- "loss": 11.9289,
31
  "step": 20
32
  },
33
  {
34
- "epoch": 0.023668639053254437,
35
- "eval_loss": 11.92916488647461,
36
- "eval_runtime": 3.5376,
37
- "eval_samples_per_second": 424.587,
38
- "eval_steps_per_second": 26.572,
39
- "step": 20
40
- },
41
- {
42
- "epoch": 0.03550295857988166,
43
- "grad_norm": 0.251953125,
44
  "learning_rate": 4.8e-05,
45
- "loss": 11.9288,
46
  "step": 30
47
  },
48
  {
49
- "epoch": 0.047337278106508875,
50
- "grad_norm": 0.1435546875,
51
  "learning_rate": 6.400000000000001e-05,
52
- "loss": 11.9293,
53
  "step": 40
54
  },
55
  {
56
- "epoch": 0.047337278106508875,
57
- "eval_loss": 11.928914070129395,
58
- "eval_runtime": 3.5225,
59
- "eval_samples_per_second": 426.404,
60
- "eval_steps_per_second": 26.686,
61
- "step": 40
62
- },
63
- {
64
- "epoch": 0.05917159763313609,
65
- "grad_norm": 0.373046875,
66
  "learning_rate": 8e-05,
67
- "loss": 11.9265,
68
  "step": 50
69
  },
70
  {
71
- "epoch": 0.07100591715976332,
72
- "grad_norm": 0.158203125,
73
  "learning_rate": 9.6e-05,
74
- "loss": 11.9267,
75
- "step": 60
76
- },
77
- {
78
- "epoch": 0.07100591715976332,
79
- "eval_loss": 11.92799186706543,
80
- "eval_runtime": 3.5615,
81
- "eval_samples_per_second": 421.727,
82
- "eval_steps_per_second": 26.393,
83
  "step": 60
84
  },
85
  {
86
- "epoch": 0.08284023668639054,
87
- "grad_norm": 0.146484375,
88
  "learning_rate": 0.00011200000000000001,
89
- "loss": 11.9275,
90
  "step": 70
91
  },
92
  {
93
- "epoch": 0.09467455621301775,
94
- "grad_norm": 0.2333984375,
95
  "learning_rate": 0.00012800000000000002,
96
- "loss": 11.9256,
97
  "step": 80
98
  },
99
  {
100
- "epoch": 0.09467455621301775,
101
- "eval_loss": 11.925419807434082,
102
- "eval_runtime": 3.5961,
103
- "eval_samples_per_second": 417.672,
104
- "eval_steps_per_second": 26.139,
105
- "step": 80
106
- },
107
- {
108
- "epoch": 0.10650887573964497,
109
- "grad_norm": 0.236328125,
110
  "learning_rate": 0.000144,
111
- "loss": 11.9219,
112
  "step": 90
113
  },
114
  {
115
- "epoch": 0.11834319526627218,
116
- "grad_norm": 0.51953125,
117
  "learning_rate": 0.00016,
118
- "loss": 11.9151,
119
- "step": 100
120
- },
121
- {
122
- "epoch": 0.11834319526627218,
123
- "eval_loss": 11.913871765136719,
124
- "eval_runtime": 3.5719,
125
- "eval_samples_per_second": 420.504,
126
- "eval_steps_per_second": 26.316,
127
  "step": 100
128
  },
129
  {
130
- "epoch": 0.1301775147928994,
131
- "grad_norm": 0.494140625,
132
  "learning_rate": 0.00017600000000000002,
133
- "loss": 11.9056,
134
  "step": 110
135
  },
136
  {
137
- "epoch": 0.14201183431952663,
138
- "grad_norm": 0.6171875,
139
  "learning_rate": 0.000192,
140
- "loss": 11.8786,
141
  "step": 120
142
  },
143
  {
144
- "epoch": 0.14201183431952663,
145
- "eval_loss": 11.85535717010498,
146
- "eval_runtime": 3.5396,
147
- "eval_samples_per_second": 424.345,
148
- "eval_steps_per_second": 26.557,
149
- "step": 120
150
- },
151
- {
152
- "epoch": 0.15384615384615385,
153
- "grad_norm": 0.59765625,
154
  "learning_rate": 0.0001999978128380225,
155
- "loss": 11.8261,
156
  "step": 130
157
  },
158
  {
159
- "epoch": 0.16568047337278108,
160
- "grad_norm": 0.4921875,
161
  "learning_rate": 0.0001999803161162393,
162
- "loss": 11.7729,
163
- "step": 140
164
- },
165
- {
166
- "epoch": 0.16568047337278108,
167
- "eval_loss": 11.73879337310791,
168
- "eval_runtime": 3.4959,
169
- "eval_samples_per_second": 429.652,
170
- "eval_steps_per_second": 26.889,
171
  "step": 140
172
  },
173
  {
174
- "epoch": 0.17751479289940827,
175
- "grad_norm": 0.6171875,
176
  "learning_rate": 0.00019994532573409262,
177
- "loss": 11.714,
178
  "step": 150
179
  },
180
  {
181
- "epoch": 0.1893491124260355,
182
- "grad_norm": 0.390625,
183
  "learning_rate": 0.00019989284781388617,
184
- "loss": 11.6588,
185
  "step": 160
186
  },
187
  {
188
- "epoch": 0.1893491124260355,
189
- "eval_loss": 11.629658699035645,
190
- "eval_runtime": 3.5771,
191
- "eval_samples_per_second": 419.895,
192
- "eval_steps_per_second": 26.278,
193
- "step": 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  }
195
  ],
196
  "logging_steps": 10,
197
  "max_steps": 2500,
198
  "num_input_tokens_seen": 0,
199
- "num_train_epochs": 3,
200
- "save_steps": 40,
201
  "stateful_callbacks": {
202
  "TrainerControl": {
203
  "args": {
@@ -210,8 +330,8 @@
210
  "attributes": {}
211
  }
212
  },
213
- "total_flos": 62711857152.0,
214
- "train_batch_size": 2,
215
  "trial_name": null,
216
  "trial_params": null
217
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.11837821840781296,
5
+ "eval_steps": 200,
6
+ "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.0002959455460195324,
13
+ "eval_loss": 11.929207801818848,
14
+ "eval_runtime": 36.681,
15
+ "eval_samples_per_second": 40.948,
16
+ "eval_steps_per_second": 10.251,
17
  "step": 1
18
  },
19
  {
20
+ "epoch": 0.002959455460195324,
21
+ "grad_norm": 0.212890625,
22
  "learning_rate": 1.6000000000000003e-05,
23
+ "loss": 11.9295,
24
  "step": 10
25
  },
26
  {
27
+ "epoch": 0.005918910920390648,
28
+ "grad_norm": 0.263671875,
29
  "learning_rate": 3.2000000000000005e-05,
30
+ "loss": 11.9301,
31
  "step": 20
32
  },
33
  {
34
+ "epoch": 0.008878366380585973,
35
+ "grad_norm": 0.375,
 
 
 
 
 
 
 
 
36
  "learning_rate": 4.8e-05,
37
+ "loss": 11.9292,
38
  "step": 30
39
  },
40
  {
41
+ "epoch": 0.011837821840781295,
42
+ "grad_norm": 0.47265625,
43
  "learning_rate": 6.400000000000001e-05,
44
+ "loss": 11.926,
45
  "step": 40
46
  },
47
  {
48
+ "epoch": 0.01479727730097662,
49
+ "grad_norm": 0.84375,
 
 
 
 
 
 
 
 
50
  "learning_rate": 8e-05,
51
+ "loss": 11.9345,
52
  "step": 50
53
  },
54
  {
55
+ "epoch": 0.017756732761171946,
56
+ "grad_norm": 0.244140625,
57
  "learning_rate": 9.6e-05,
58
+ "loss": 11.9293,
 
 
 
 
 
 
 
 
59
  "step": 60
60
  },
61
  {
62
+ "epoch": 0.020716188221367268,
63
+ "grad_norm": 0.255859375,
64
  "learning_rate": 0.00011200000000000001,
65
+ "loss": 11.9289,
66
  "step": 70
67
  },
68
  {
69
+ "epoch": 0.02367564368156259,
70
+ "grad_norm": 0.34375,
71
  "learning_rate": 0.00012800000000000002,
72
+ "loss": 11.9291,
73
  "step": 80
74
  },
75
  {
76
+ "epoch": 0.026635099141757917,
77
+ "grad_norm": 0.453125,
 
 
 
 
 
 
 
 
78
  "learning_rate": 0.000144,
79
+ "loss": 11.9271,
80
  "step": 90
81
  },
82
  {
83
+ "epoch": 0.02959455460195324,
84
+ "grad_norm": 0.80859375,
85
  "learning_rate": 0.00016,
86
+ "loss": 11.9272,
 
 
 
 
 
 
 
 
87
  "step": 100
88
  },
89
  {
90
+ "epoch": 0.032554010062148565,
91
+ "grad_norm": 0.23046875,
92
  "learning_rate": 0.00017600000000000002,
93
+ "loss": 11.9286,
94
  "step": 110
95
  },
96
  {
97
+ "epoch": 0.03551346552234389,
98
+ "grad_norm": 0.322265625,
99
  "learning_rate": 0.000192,
100
+ "loss": 11.9239,
101
  "step": 120
102
  },
103
  {
104
+ "epoch": 0.03847292098253921,
105
+ "grad_norm": 0.4296875,
 
 
 
 
 
 
 
 
106
  "learning_rate": 0.0001999978128380225,
107
+ "loss": 11.929,
108
  "step": 130
109
  },
110
  {
111
+ "epoch": 0.041432376442734536,
112
+ "grad_norm": 0.5,
113
  "learning_rate": 0.0001999803161162393,
114
+ "loss": 11.9268,
 
 
 
 
 
 
 
 
115
  "step": 140
116
  },
117
  {
118
+ "epoch": 0.04439183190292986,
119
+ "grad_norm": 1.2578125,
120
  "learning_rate": 0.00019994532573409262,
121
+ "loss": 11.9198,
122
  "step": 150
123
  },
124
  {
125
+ "epoch": 0.04735128736312518,
126
+ "grad_norm": 0.400390625,
127
  "learning_rate": 0.00019989284781388617,
128
+ "loss": 11.9145,
129
  "step": 160
130
  },
131
  {
132
+ "epoch": 0.05031074282332051,
133
+ "grad_norm": 0.5234375,
134
+ "learning_rate": 0.00019982289153773646,
135
+ "loss": 11.9004,
136
+ "step": 170
137
+ },
138
+ {
139
+ "epoch": 0.053270198283515834,
140
+ "grad_norm": 0.796875,
141
+ "learning_rate": 0.00019973546914596623,
142
+ "loss": 11.8792,
143
+ "step": 180
144
+ },
145
+ {
146
+ "epoch": 0.05622965374371116,
147
+ "grad_norm": 0.7109375,
148
+ "learning_rate": 0.00019963059593496268,
149
+ "loss": 11.8308,
150
+ "step": 190
151
+ },
152
+ {
153
+ "epoch": 0.05918910920390648,
154
+ "grad_norm": 0.7734375,
155
+ "learning_rate": 0.00019950829025450114,
156
+ "loss": 11.78,
157
+ "step": 200
158
+ },
159
+ {
160
+ "epoch": 0.05918910920390648,
161
+ "eval_loss": 11.750052452087402,
162
+ "eval_runtime": 37.1832,
163
+ "eval_samples_per_second": 40.395,
164
+ "eval_steps_per_second": 10.112,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 0.062148564664101805,
169
+ "grad_norm": 0.451171875,
170
+ "learning_rate": 0.0001993685735045343,
171
+ "loss": 11.7099,
172
+ "step": 210
173
+ },
174
+ {
175
+ "epoch": 0.06510802012429713,
176
+ "grad_norm": 0.435546875,
177
+ "learning_rate": 0.0001992114701314478,
178
+ "loss": 11.6724,
179
+ "step": 220
180
+ },
181
+ {
182
+ "epoch": 0.06806747558449246,
183
+ "grad_norm": 0.33203125,
184
+ "learning_rate": 0.000199037007623783,
185
+ "loss": 11.6468,
186
+ "step": 230
187
+ },
188
+ {
189
+ "epoch": 0.07102693104468778,
190
+ "grad_norm": 0.625,
191
+ "learning_rate": 0.00019884521650742715,
192
+ "loss": 11.6114,
193
+ "step": 240
194
+ },
195
+ {
196
+ "epoch": 0.0739863865048831,
197
+ "grad_norm": 0.9296875,
198
+ "learning_rate": 0.00019863613034027224,
199
+ "loss": 11.6611,
200
+ "step": 250
201
+ },
202
+ {
203
+ "epoch": 0.07694584196507842,
204
+ "grad_norm": 0.482421875,
205
+ "learning_rate": 0.0001984097857063434,
206
+ "loss": 11.4853,
207
+ "step": 260
208
+ },
209
+ {
210
+ "epoch": 0.07990529742527375,
211
+ "grad_norm": 0.41796875,
212
+ "learning_rate": 0.0001981662222093976,
213
+ "loss": 11.4982,
214
+ "step": 270
215
+ },
216
+ {
217
+ "epoch": 0.08286475288546907,
218
+ "grad_norm": 0.59375,
219
+ "learning_rate": 0.00019790548246599447,
220
+ "loss": 11.4479,
221
+ "step": 280
222
+ },
223
+ {
224
+ "epoch": 0.0858242083456644,
225
+ "grad_norm": 1.0078125,
226
+ "learning_rate": 0.00019762761209803927,
227
+ "loss": 11.3621,
228
+ "step": 290
229
+ },
230
+ {
231
+ "epoch": 0.08878366380585972,
232
+ "grad_norm": 1.921875,
233
+ "learning_rate": 0.0001973326597248006,
234
+ "loss": 11.3921,
235
+ "step": 300
236
+ },
237
+ {
238
+ "epoch": 0.09174311926605505,
239
+ "grad_norm": 0.400390625,
240
+ "learning_rate": 0.00019702067695440332,
241
+ "loss": 11.2354,
242
+ "step": 310
243
+ },
244
+ {
245
+ "epoch": 0.09470257472625036,
246
+ "grad_norm": 0.490234375,
247
+ "learning_rate": 0.00019669171837479873,
248
+ "loss": 11.2262,
249
+ "step": 320
250
+ },
251
+ {
252
+ "epoch": 0.09766203018644569,
253
+ "grad_norm": 0.5234375,
254
+ "learning_rate": 0.00019634584154421317,
255
+ "loss": 11.1823,
256
+ "step": 330
257
+ },
258
+ {
259
+ "epoch": 0.10062148564664102,
260
+ "grad_norm": 0.46875,
261
+ "learning_rate": 0.00019598310698107702,
262
+ "loss": 11.1421,
263
+ "step": 340
264
+ },
265
+ {
266
+ "epoch": 0.10358094110683634,
267
+ "grad_norm": 1.203125,
268
+ "learning_rate": 0.00019560357815343577,
269
+ "loss": 11.1708,
270
+ "step": 350
271
+ },
272
+ {
273
+ "epoch": 0.10654039656703167,
274
+ "grad_norm": 0.52734375,
275
+ "learning_rate": 0.00019520732146784491,
276
+ "loss": 11.1091,
277
+ "step": 360
278
+ },
279
+ {
280
+ "epoch": 0.109499852027227,
281
+ "grad_norm": 0.67578125,
282
+ "learning_rate": 0.0001947944062577507,
283
+ "loss": 11.0413,
284
+ "step": 370
285
+ },
286
+ {
287
+ "epoch": 0.11245930748742232,
288
+ "grad_norm": 0.44921875,
289
+ "learning_rate": 0.00019436490477135878,
290
+ "loss": 11.0481,
291
+ "step": 380
292
+ },
293
+ {
294
+ "epoch": 0.11541876294761765,
295
+ "grad_norm": 0.640625,
296
+ "learning_rate": 0.00019391889215899299,
297
+ "loss": 11.1447,
298
+ "step": 390
299
+ },
300
+ {
301
+ "epoch": 0.11837821840781296,
302
+ "grad_norm": 1.203125,
303
+ "learning_rate": 0.0001934564464599461,
304
+ "loss": 10.974,
305
+ "step": 400
306
+ },
307
+ {
308
+ "epoch": 0.11837821840781296,
309
+ "eval_loss": 11.000988006591797,
310
+ "eval_runtime": 37.2684,
311
+ "eval_samples_per_second": 40.302,
312
+ "eval_steps_per_second": 10.089,
313
+ "step": 400
314
  }
315
  ],
316
  "logging_steps": 10,
317
  "max_steps": 2500,
318
  "num_input_tokens_seen": 0,
319
+ "num_train_epochs": 1,
320
+ "save_steps": 400,
321
  "stateful_callbacks": {
322
  "TrainerControl": {
323
  "args": {
 
330
  "attributes": {}
331
  }
332
  },
333
+ "total_flos": 39262224384.0,
334
+ "train_batch_size": 4,
335
  "trial_name": null,
336
  "trial_params": null
337
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20af897fff432dad9da1232d2f5e6ab4040448b601443dcf983c39a3e47a3b26
3
- size 6648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cafd6770e4d3416c27e43a8308e52009a8c0c1fff65ee59e17f0872cd1b39e9
3
+ size 6840