manu commited on
Commit
0ce7b2e
·
1 Parent(s): 4a4faf1

End of training

Browse files
Files changed (4) hide show
  1. README.md +3 -1
  2. all_results.json +6 -6
  3. train_results.json +6 -6
  4. trainer_state.json +88 -10
README.md CHANGED
@@ -2,6 +2,8 @@
2
  base_model: mock_training_run/llama_configs/config.json
3
  tags:
4
  - generated_from_trainer
 
 
5
  model-index:
6
  - name: llama-wikitext
7
  results: []
@@ -12,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
12
 
13
  # llama-wikitext
14
 
15
- This model is a fine-tuned version of [mock_training_run/llama_configs/config.json](https://huggingface.co/mock_training_run/llama_configs/config.json) on the None dataset.
16
 
17
  ## Model description
18
 
 
2
  base_model: mock_training_run/llama_configs/config.json
3
  tags:
4
  - generated_from_trainer
5
+ datasets:
6
+ - wikitext
7
  model-index:
8
  - name: llama-wikitext
9
  results: []
 
14
 
15
  # llama-wikitext
16
 
17
+ This model is a fine-tuned version of [mock_training_run/llama_configs/config.json](https://huggingface.co/mock_training_run/llama_configs/config.json) on the wikitext wikitext-103-v1 dataset.
18
 
19
  ## Model description
20
 
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.76,
3
- "train_loss": 10.722885131835938,
4
- "train_runtime": 23.7585,
5
- "train_samples": 1343,
6
- "train_samples_per_second": 56.527,
7
- "train_steps_per_second": 0.084
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 5.974732427886038,
4
+ "train_runtime": 1543.433,
5
+ "train_samples": 67643,
6
+ "train_samples_per_second": 43.826,
7
+ "train_steps_per_second": 0.086
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.76,
3
- "train_loss": 10.722885131835938,
4
- "train_runtime": 23.7585,
5
- "train_samples": 1343,
6
- "train_samples_per_second": 56.527,
7
- "train_steps_per_second": 0.084
8
  }
 
1
  {
2
+ "epoch": 1.0,
3
+ "train_loss": 5.974732427886038,
4
+ "train_runtime": 1543.433,
5
+ "train_samples": 67643,
6
+ "train_samples_per_second": 43.826,
7
+ "train_steps_per_second": 0.086
8
  }
trainer_state.json CHANGED
@@ -1,28 +1,106 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7619047619047619,
5
  "eval_steps": 100.0,
6
- "global_step": 2,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  {
12
  "epoch": 0.76,
13
- "step": 2,
14
- "total_flos": 4916124793700352.0,
15
- "train_loss": 10.722885131835938,
16
- "train_runtime": 23.7585,
17
- "train_samples_per_second": 56.527,
18
- "train_steps_per_second": 0.084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }
20
  ],
21
  "logging_steps": 10,
22
- "max_steps": 2,
23
  "num_train_epochs": 1,
24
  "save_steps": 100,
25
- "total_flos": 4916124793700352.0,
26
  "trial_name": null,
27
  "trial_params": null
28
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9990539262062441,
5
  "eval_steps": 100.0,
6
+ "global_step": 132,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
+ {
12
+ "epoch": 0.08,
13
+ "learning_rate": 0.00029279999999999996,
14
+ "loss": 9.0737,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.15,
19
+ "learning_rate": 0.0002688,
20
+ "loss": 7.1389,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.23,
25
+ "learning_rate": 0.0002448,
26
+ "loss": 6.6649,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.3,
31
+ "learning_rate": 0.00022079999999999997,
32
+ "loss": 6.3274,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.38,
37
+ "learning_rate": 0.00019679999999999999,
38
+ "loss": 6.0455,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.45,
43
+ "learning_rate": 0.00017279999999999997,
44
+ "loss": 5.7722,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.53,
49
+ "learning_rate": 0.00014879999999999998,
50
+ "loss": 5.6097,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.61,
55
+ "learning_rate": 0.00012479999999999997,
56
+ "loss": 5.4476,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.68,
61
+ "learning_rate": 0.0001008,
62
+ "loss": 5.3298,
63
+ "step": 90
64
+ },
65
  {
66
  "epoch": 0.76,
67
+ "learning_rate": 7.68e-05,
68
+ "loss": 5.2202,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.83,
73
+ "learning_rate": 5.279999999999999e-05,
74
+ "loss": 5.1345,
75
+ "step": 110
76
+ },
77
+ {
78
+ "epoch": 0.91,
79
+ "learning_rate": 2.88e-05,
80
+ "loss": 5.067,
81
+ "step": 120
82
+ },
83
+ {
84
+ "epoch": 0.98,
85
+ "learning_rate": 4.8e-06,
86
+ "loss": 5.0283,
87
+ "step": 130
88
+ },
89
+ {
90
+ "epoch": 1.0,
91
+ "step": 132,
92
+ "total_flos": 3.244642363842232e+17,
93
+ "train_loss": 5.974732427886038,
94
+ "train_runtime": 1543.433,
95
+ "train_samples_per_second": 43.826,
96
+ "train_steps_per_second": 0.086
97
  }
98
  ],
99
  "logging_steps": 10,
100
+ "max_steps": 132,
101
  "num_train_epochs": 1,
102
  "save_steps": 100,
103
+ "total_flos": 3.244642363842232e+17,
104
  "trial_name": null,
105
  "trial_params": null
106
  }