lucasrct commited on
Commit
43b22c8
·
verified ·
1 Parent(s): dee96d4

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # sentiment_prompt_1_model
2
+
3
+ ## Model Description
4
+ This model is a fine-tuned version of `EleutherAI/pythia-410m` trained on `dair-ai/emotion` data.
5
+
6
+ ## Dataset Details
7
+ - Dataset Configuration: unsplit
8
+ - Dataset Name: dair-ai/emotion
9
+ - Prompt: analyze the sentiment of the given text: {text}
10
+ {label}
11
+
12
+ ## Training Details
13
+ - Base Model: EleutherAI/pythia-410m
14
+ - Training Parameters:
15
+ - Learning Rate: 2e-05
16
+ - Batch Size: 1
17
+ - Epochs: 1
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/pythia-410m",
3
+ "architectures": [
4
+ "GPTNeoXForCausalLM"
5
+ ],
6
+ "attention_bias": true,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.1,
10
+ "eos_token_id": 0,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout": 0.0,
13
+ "hidden_size": 1024,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 4096,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "gpt_neox",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "partial_rotary_factor": 0.25,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000,
24
+ "rotary_emb_base": 10000,
25
+ "rotary_pct": 0.25,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.48.0",
29
+ "use_cache": true,
30
+ "use_parallel_residual": true,
31
+ "vocab_size": 50304
32
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.48.0"
6
+ }
gpu_memory_log.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Current GPU Memory allocated: 2.37 GB
2
+ Max GPU Memory allocated: 4.04 GB
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94e86c95eeb62dd0ff8cd8c14221d12bdf8db01bed58e1dd0c59240867d94769
3
+ size 810702192
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35f504e1e3f33d7af4d5bb5f71703aca9b5a740ccb117c796d65229a17ccb6f1
3
+ size 1621580026
ram_usage_log.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Total RAM: 186.68 GB
2
+ Available RAM: 155.76 GB
3
+ Used RAM: 28.88 GB
4
+ RAM Usage Percentage: 16.6%
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386fcc8cc1089aade9450d86fb239ea3483f455fd2d78d8378645feecfec9d69
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd330c71330ca5dfdd289cbd452c1e86b567510cb45bbbeadf00e962e2b0d1b
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 24750,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04040404040404041,
13
+ "grad_norm": 3.28125,
14
+ "learning_rate": 1.9587628865979382e-05,
15
+ "loss": 1.1421,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.08080808080808081,
20
+ "grad_norm": 2.234375,
21
+ "learning_rate": 1.8762886597938147e-05,
22
+ "loss": 0.153,
23
+ "step": 2000
24
+ },
25
+ {
26
+ "epoch": 0.12121212121212122,
27
+ "grad_norm": 1.78125,
28
+ "learning_rate": 1.793814432989691e-05,
29
+ "loss": 0.1516,
30
+ "step": 3000
31
+ },
32
+ {
33
+ "epoch": 0.16161616161616163,
34
+ "grad_norm": 1.421875,
35
+ "learning_rate": 1.7113402061855672e-05,
36
+ "loss": 0.1508,
37
+ "step": 4000
38
+ },
39
+ {
40
+ "epoch": 0.20202020202020202,
41
+ "grad_norm": 1.9296875,
42
+ "learning_rate": 1.6288659793814433e-05,
43
+ "loss": 0.149,
44
+ "step": 5000
45
+ },
46
+ {
47
+ "epoch": 0.24242424242424243,
48
+ "grad_norm": 1.4453125,
49
+ "learning_rate": 1.5463917525773197e-05,
50
+ "loss": 0.147,
51
+ "step": 6000
52
+ },
53
+ {
54
+ "epoch": 0.2828282828282828,
55
+ "grad_norm": 1.8203125,
56
+ "learning_rate": 1.4639175257731958e-05,
57
+ "loss": 0.1473,
58
+ "step": 7000
59
+ },
60
+ {
61
+ "epoch": 0.32323232323232326,
62
+ "grad_norm": 0.94140625,
63
+ "learning_rate": 1.3814432989690723e-05,
64
+ "loss": 0.1459,
65
+ "step": 8000
66
+ },
67
+ {
68
+ "epoch": 0.36363636363636365,
69
+ "grad_norm": 1.984375,
70
+ "learning_rate": 1.2989690721649485e-05,
71
+ "loss": 0.1463,
72
+ "step": 9000
73
+ },
74
+ {
75
+ "epoch": 0.40404040404040403,
76
+ "grad_norm": 1.9375,
77
+ "learning_rate": 1.2164948453608248e-05,
78
+ "loss": 0.1444,
79
+ "step": 10000
80
+ },
81
+ {
82
+ "epoch": 0.4444444444444444,
83
+ "grad_norm": 1.3984375,
84
+ "learning_rate": 1.134020618556701e-05,
85
+ "loss": 0.1458,
86
+ "step": 11000
87
+ },
88
+ {
89
+ "epoch": 0.48484848484848486,
90
+ "grad_norm": 1.734375,
91
+ "learning_rate": 1.0515463917525775e-05,
92
+ "loss": 0.1446,
93
+ "step": 12000
94
+ },
95
+ {
96
+ "epoch": 0.5252525252525253,
97
+ "grad_norm": 1.25,
98
+ "learning_rate": 9.690721649484536e-06,
99
+ "loss": 0.1454,
100
+ "step": 13000
101
+ },
102
+ {
103
+ "epoch": 0.5656565656565656,
104
+ "grad_norm": 2.65625,
105
+ "learning_rate": 8.865979381443299e-06,
106
+ "loss": 0.1454,
107
+ "step": 14000
108
+ },
109
+ {
110
+ "epoch": 0.6060606060606061,
111
+ "grad_norm": 1.515625,
112
+ "learning_rate": 8.041237113402063e-06,
113
+ "loss": 0.1456,
114
+ "step": 15000
115
+ },
116
+ {
117
+ "epoch": 0.6464646464646465,
118
+ "grad_norm": 1.328125,
119
+ "learning_rate": 7.216494845360825e-06,
120
+ "loss": 0.1469,
121
+ "step": 16000
122
+ },
123
+ {
124
+ "epoch": 0.6868686868686869,
125
+ "grad_norm": 1.296875,
126
+ "learning_rate": 6.391752577319588e-06,
127
+ "loss": 0.1466,
128
+ "step": 17000
129
+ },
130
+ {
131
+ "epoch": 0.7272727272727273,
132
+ "grad_norm": 0.91015625,
133
+ "learning_rate": 5.567010309278351e-06,
134
+ "loss": 0.147,
135
+ "step": 18000
136
+ },
137
+ {
138
+ "epoch": 0.7676767676767676,
139
+ "grad_norm": 1.90625,
140
+ "learning_rate": 4.742268041237113e-06,
141
+ "loss": 0.1477,
142
+ "step": 19000
143
+ },
144
+ {
145
+ "epoch": 0.8080808080808081,
146
+ "grad_norm": 1.3359375,
147
+ "learning_rate": 3.917525773195877e-06,
148
+ "loss": 0.1461,
149
+ "step": 20000
150
+ },
151
+ {
152
+ "epoch": 0.8484848484848485,
153
+ "grad_norm": 1.625,
154
+ "learning_rate": 3.0927835051546395e-06,
155
+ "loss": 0.1485,
156
+ "step": 21000
157
+ },
158
+ {
159
+ "epoch": 0.8888888888888888,
160
+ "grad_norm": 1.3671875,
161
+ "learning_rate": 2.268041237113402e-06,
162
+ "loss": 0.1473,
163
+ "step": 22000
164
+ },
165
+ {
166
+ "epoch": 0.9292929292929293,
167
+ "grad_norm": 1.2578125,
168
+ "learning_rate": 1.4432989690721649e-06,
169
+ "loss": 0.1463,
170
+ "step": 23000
171
+ },
172
+ {
173
+ "epoch": 0.9696969696969697,
174
+ "grad_norm": 1.1875,
175
+ "learning_rate": 6.185567010309279e-07,
176
+ "loss": 0.1461,
177
+ "step": 24000
178
+ }
179
+ ],
180
+ "logging_steps": 1000,
181
+ "max_steps": 24750,
182
+ "num_input_tokens_seen": 0,
183
+ "num_train_epochs": 1,
184
+ "save_steps": 1000,
185
+ "stateful_callbacks": {
186
+ "TrainerControl": {
187
+ "args": {
188
+ "should_epoch_stop": false,
189
+ "should_evaluate": false,
190
+ "should_log": false,
191
+ "should_save": true,
192
+ "should_training_stop": true
193
+ },
194
+ "attributes": {}
195
+ }
196
+ },
197
+ "total_flos": 1.0760739618816e+17,
198
+ "train_batch_size": 1,
199
+ "trial_name": null,
200
+ "trial_params": null
201
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7eaf1a6dbfa8bdd6295b8ad03c703ba7f38d2d2d0ecf415cdca68154d832532
3
+ size 5304
training_config.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ model_name: "EleutherAI/pythia-410m"
2
+ learning_rate: 2.0e-5
3
+ batch_size: 1
4
+ num_epochs: 1
5
+ warmup_steps: 500
6
+ logging_steps: 1000
7
+ # eval_steps: 2000
8
+ save_steps: 1000
9
+ gradient_accumulation_steps: 4