bilal521 commited on
Commit
23d4ff6
·
verified ·
1 Parent(s): fb20318

Upload model files

Browse files
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5ForConditionalGeneration"
4
+ ],
5
+ "classifier_dropout": 0.0,
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.52.4",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.52.4"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b4b73713a370c1608044b07bf4c02f3008cc7a2f204e481e6f67f6f3391b0cd
3
+ size 990345064
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b40e29cd664386339446bdf1d1ad5175ec9d44ac171361fe40988f77af340ff4
3
+ size 1980860410
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af31f86f3388abfa5b91b3d55dab6c79562c22ff02e4f44b8c52a86acb6ca100
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:126e8cb42a7d71fa1c7a67cb29aa5216afa982e0e5306a10e4d09b944176db05
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 326,
3
+ "best_metric": 1.5841516256332397,
4
+ "best_model_checkpoint": "results_t5base/checkpoint-326",
5
+ "epoch": 2.0,
6
+ "eval_steps": 200,
7
+ "global_step": 326,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.06134969325153374,
14
+ "grad_norm": 116.47437286376953,
15
+ "learning_rate": 1.8e-05,
16
+ "loss": 15.8526,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.12269938650306748,
21
+ "grad_norm": 36.57353591918945,
22
+ "learning_rate": 3.8e-05,
23
+ "loss": 11.7488,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.18404907975460122,
28
+ "grad_norm": 32.9762077331543,
29
+ "learning_rate": 5.800000000000001e-05,
30
+ "loss": 10.3287,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.24539877300613497,
35
+ "grad_norm": 23.83042335510254,
36
+ "learning_rate": 7.8e-05,
37
+ "loss": 6.4168,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.3067484662576687,
42
+ "grad_norm": 4.865868091583252,
43
+ "learning_rate": 9.800000000000001e-05,
44
+ "loss": 3.5254,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.36809815950920244,
49
+ "grad_norm": 12.269708633422852,
50
+ "learning_rate": 0.000118,
51
+ "loss": 3.0389,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.4294478527607362,
56
+ "grad_norm": 7.369626998901367,
57
+ "learning_rate": 0.00013800000000000002,
58
+ "loss": 2.5275,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.49079754601226994,
63
+ "grad_norm": 2.0186314582824707,
64
+ "learning_rate": 0.000158,
65
+ "loss": 2.1442,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.5521472392638037,
70
+ "grad_norm": 1.231919527053833,
71
+ "learning_rate": 0.000178,
72
+ "loss": 1.8347,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.6134969325153374,
77
+ "grad_norm": 1.3266997337341309,
78
+ "learning_rate": 0.00019800000000000002,
79
+ "loss": 1.7494,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.6748466257668712,
84
+ "grad_norm": 1.498828649520874,
85
+ "learning_rate": 0.000218,
86
+ "loss": 1.8056,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.7361963190184049,
91
+ "grad_norm": 0.9476116299629211,
92
+ "learning_rate": 0.00023799999999999998,
93
+ "loss": 1.7621,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.7975460122699386,
98
+ "grad_norm": 1.154313564300537,
99
+ "learning_rate": 0.00025800000000000004,
100
+ "loss": 1.7556,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.8588957055214724,
105
+ "grad_norm": 0.7778469920158386,
106
+ "learning_rate": 0.00027800000000000004,
107
+ "loss": 1.7215,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.9202453987730062,
112
+ "grad_norm": 0.8308514356613159,
113
+ "learning_rate": 0.000298,
114
+ "loss": 1.6761,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.9815950920245399,
119
+ "grad_norm": 1.156718373298645,
120
+ "learning_rate": 0.00031800000000000003,
121
+ "loss": 1.6865,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 1.0,
126
+ "eval_gen_len": 187.0146,
127
+ "eval_loss": 1.5963470935821533,
128
+ "eval_rouge1": 0.6129,
129
+ "eval_rouge2": 0.2686,
130
+ "eval_rougeL": 0.4794,
131
+ "eval_runtime": 30.837,
132
+ "eval_samples_per_second": 8.885,
133
+ "eval_steps_per_second": 1.135,
134
+ "step": 163
135
+ },
136
+ {
137
+ "epoch": 1.0429447852760736,
138
+ "grad_norm": 0.8923042416572571,
139
+ "learning_rate": 0.00033800000000000003,
140
+ "loss": 1.6244,
141
+ "step": 170
142
+ },
143
+ {
144
+ "epoch": 1.1042944785276074,
145
+ "grad_norm": 1.0038191080093384,
146
+ "learning_rate": 0.000358,
147
+ "loss": 1.6075,
148
+ "step": 180
149
+ },
150
+ {
151
+ "epoch": 1.165644171779141,
152
+ "grad_norm": 0.9305262565612793,
153
+ "learning_rate": 0.000378,
154
+ "loss": 1.6722,
155
+ "step": 190
156
+ },
157
+ {
158
+ "epoch": 1.2269938650306749,
159
+ "grad_norm": 1.2464691400527954,
160
+ "learning_rate": 0.000398,
161
+ "loss": 1.5723,
162
+ "step": 200
163
+ },
164
+ {
165
+ "epoch": 1.2883435582822087,
166
+ "grad_norm": 1.017632246017456,
167
+ "learning_rate": 0.00041799999999999997,
168
+ "loss": 1.5711,
169
+ "step": 210
170
+ },
171
+ {
172
+ "epoch": 1.3496932515337423,
173
+ "grad_norm": 1.2981927394866943,
174
+ "learning_rate": 0.000438,
175
+ "loss": 1.5017,
176
+ "step": 220
177
+ },
178
+ {
179
+ "epoch": 1.4110429447852761,
180
+ "grad_norm": 0.7919584512710571,
181
+ "learning_rate": 0.000458,
182
+ "loss": 1.5671,
183
+ "step": 230
184
+ },
185
+ {
186
+ "epoch": 1.4723926380368098,
187
+ "grad_norm": 1.0223811864852905,
188
+ "learning_rate": 0.00047799999999999996,
189
+ "loss": 1.5162,
190
+ "step": 240
191
+ },
192
+ {
193
+ "epoch": 1.5337423312883436,
194
+ "grad_norm": 0.8784969449043274,
195
+ "learning_rate": 0.000498,
196
+ "loss": 1.5689,
197
+ "step": 250
198
+ },
199
+ {
200
+ "epoch": 1.5950920245398774,
201
+ "grad_norm": 0.8144865036010742,
202
+ "learning_rate": 0.000518,
203
+ "loss": 1.6243,
204
+ "step": 260
205
+ },
206
+ {
207
+ "epoch": 1.656441717791411,
208
+ "grad_norm": 0.846225380897522,
209
+ "learning_rate": 0.0005380000000000001,
210
+ "loss": 1.5596,
211
+ "step": 270
212
+ },
213
+ {
214
+ "epoch": 1.7177914110429446,
215
+ "grad_norm": 0.9448590278625488,
216
+ "learning_rate": 0.000558,
217
+ "loss": 1.399,
218
+ "step": 280
219
+ },
220
+ {
221
+ "epoch": 1.7791411042944785,
222
+ "grad_norm": 0.9131848812103271,
223
+ "learning_rate": 0.000578,
224
+ "loss": 1.6336,
225
+ "step": 290
226
+ },
227
+ {
228
+ "epoch": 1.8404907975460123,
229
+ "grad_norm": 0.8644697070121765,
230
+ "learning_rate": 0.000598,
231
+ "loss": 1.6112,
232
+ "step": 300
233
+ },
234
+ {
235
+ "epoch": 1.9018404907975461,
236
+ "grad_norm": 0.964495062828064,
237
+ "learning_rate": 0.0006180000000000001,
238
+ "loss": 1.5901,
239
+ "step": 310
240
+ },
241
+ {
242
+ "epoch": 1.9631901840490797,
243
+ "grad_norm": 0.8454228043556213,
244
+ "learning_rate": 0.000638,
245
+ "loss": 1.609,
246
+ "step": 320
247
+ },
248
+ {
249
+ "epoch": 2.0,
250
+ "eval_gen_len": 187.0146,
251
+ "eval_loss": 1.5841516256332397,
252
+ "eval_rouge1": 0.6218,
253
+ "eval_rouge2": 0.2773,
254
+ "eval_rougeL": 0.4814,
255
+ "eval_runtime": 30.1539,
256
+ "eval_samples_per_second": 9.087,
257
+ "eval_steps_per_second": 1.161,
258
+ "step": 326
259
+ }
260
+ ],
261
+ "logging_steps": 10,
262
+ "max_steps": 815,
263
+ "num_input_tokens_seen": 0,
264
+ "num_train_epochs": 5,
265
+ "save_steps": 500,
266
+ "stateful_callbacks": {
267
+ "EarlyStoppingCallback": {
268
+ "args": {
269
+ "early_stopping_patience": 2,
270
+ "early_stopping_threshold": 0.0
271
+ },
272
+ "attributes": {
273
+ "early_stopping_patience_counter": 0
274
+ }
275
+ },
276
+ "TrainerControl": {
277
+ "args": {
278
+ "should_epoch_stop": false,
279
+ "should_evaluate": false,
280
+ "should_log": false,
281
+ "should_save": true,
282
+ "should_training_stop": false
283
+ },
284
+ "attributes": {}
285
+ }
286
+ },
287
+ "total_flos": 1780369116364800.0,
288
+ "train_batch_size": 8,
289
+ "trial_name": null,
290
+ "trial_params": null
291
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7754bd4e83bed7b96855efa35789508d4de02653b5f8d39fee4dc8e79aa7cef3
3
+ size 5240