Model save
Browse files- README.md +68 -0
- all_results.json +7 -0
- train_results.json +7 -0
- trainer_state.json +96 -0
README.md
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
|
3 |
+
library_name: transformers
|
4 |
+
model_name: Qwen-1.5B-Distill-GRPO
|
5 |
+
tags:
|
6 |
+
- generated_from_trainer
|
7 |
+
- trl
|
8 |
+
- grpo
|
9 |
+
licence: license
|
10 |
+
---
|
11 |
+
|
12 |
+
# Model Card for Qwen-1.5B-Distill-GRPO
|
13 |
+
|
14 |
+
This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B).
|
15 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
16 |
+
|
17 |
+
## Quick start
|
18 |
+
|
19 |
+
```python
|
20 |
+
from transformers import pipeline
|
21 |
+
|
22 |
+
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
|
23 |
+
generator = pipeline("text-generation", model="rasdani/Qwen-1.5B-Distill-GRPO", device="cuda")
|
24 |
+
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
|
25 |
+
print(output["generated_text"])
|
26 |
+
```
|
27 |
+
|
28 |
+
## Training procedure
|
29 |
+
|
30 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/daniel-a/huggingface/runs/9yo9wwlb)
|
31 |
+
|
32 |
+
|
33 |
+
This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
|
34 |
+
|
35 |
+
### Framework versions
|
36 |
+
|
37 |
+
- TRL: 0.16.0
|
38 |
+
- Transformers: 4.50.2
|
39 |
+
- Pytorch: 2.6.0
|
40 |
+
- Datasets: 3.5.0
|
41 |
+
- Tokenizers: 0.21.1
|
42 |
+
|
43 |
+
## Citations
|
44 |
+
|
45 |
+
Cite GRPO as:
|
46 |
+
|
47 |
+
```bibtex
|
48 |
+
@article{zhihong2024deepseekmath,
|
49 |
+
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
|
50 |
+
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
|
51 |
+
year = 2024,
|
52 |
+
eprint = {arXiv:2402.03300},
|
53 |
+
}
|
54 |
+
|
55 |
+
```
|
56 |
+
|
57 |
+
Cite TRL as:
|
58 |
+
|
59 |
+
```bibtex
|
60 |
+
@misc{vonwerra2022trl,
|
61 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
62 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
|
63 |
+
year = 2020,
|
64 |
+
journal = {GitHub repository},
|
65 |
+
publisher = {GitHub},
|
66 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
67 |
+
}
|
68 |
+
```
|
all_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"total_flos": 0.0,
|
3 |
+
"train_loss": 0.0035367666953994383,
|
4 |
+
"train_runtime": 25807.2718,
|
5 |
+
"train_samples_per_second": 0.039,
|
6 |
+
"train_steps_per_second": 0.001
|
7 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"total_flos": 0.0,
|
3 |
+
"train_loss": 0.0035367666953994383,
|
4 |
+
"train_runtime": 25807.2718,
|
5 |
+
"train_samples_per_second": 0.039,
|
6 |
+
"train_steps_per_second": 0.001
|
7 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_global_step": null,
|
3 |
+
"best_metric": null,
|
4 |
+
"best_model_checkpoint": null,
|
5 |
+
"epoch": 0.992,
|
6 |
+
"eval_steps": 500,
|
7 |
+
"global_step": 31,
|
8 |
+
"is_hyper_param_search": false,
|
9 |
+
"is_local_process_zero": true,
|
10 |
+
"is_world_process_zero": true,
|
11 |
+
"log_history": [
|
12 |
+
{
|
13 |
+
"clip_ratio": 0.0,
|
14 |
+
"completion_length": 503.94140625,
|
15 |
+
"epoch": 0.32,
|
16 |
+
"grad_norm": 0.055206917226314545,
|
17 |
+
"kl": 0.0007647275924682617,
|
18 |
+
"learning_rate": 1.5289640103269626e-05,
|
19 |
+
"loss": -0.0178,
|
20 |
+
"num_tokens": 837177.0,
|
21 |
+
"reward": 0.14900912609314218,
|
22 |
+
"reward_std": 0.23200407949070723,
|
23 |
+
"rewards/_accuracy_reward": 0.14900912609314218,
|
24 |
+
"rewards/_format_reward": 0.0,
|
25 |
+
"step": 10
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"clip_ratio": 0.0,
|
29 |
+
"completion_length": 513.55390625,
|
30 |
+
"epoch": 0.64,
|
31 |
+
"grad_norm": 0.048017717897892,
|
32 |
+
"kl": 0.0019702911376953125,
|
33 |
+
"learning_rate": 5.5960584844236565e-06,
|
34 |
+
"loss": 0.0085,
|
35 |
+
"num_tokens": 1686614.0,
|
36 |
+
"reward": 0.20446404698841433,
|
37 |
+
"reward_std": 0.26745549326642504,
|
38 |
+
"rewards/_accuracy_reward": 0.20446404698841433,
|
39 |
+
"rewards/_format_reward": 0.0,
|
40 |
+
"step": 20
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"clip_ratio": 0.0,
|
44 |
+
"completion_length": 525.440625,
|
45 |
+
"epoch": 0.96,
|
46 |
+
"grad_norm": 0.049744077026844025,
|
47 |
+
"kl": 0.002144050598144531,
|
48 |
+
"learning_rate": 5.1306766081048456e-08,
|
49 |
+
"loss": 0.0187,
|
50 |
+
"num_tokens": 2550850.0,
|
51 |
+
"reward": 0.23339965324412332,
|
52 |
+
"reward_std": 0.3011114658700535,
|
53 |
+
"rewards/_accuracy_reward": 0.23339965324412332,
|
54 |
+
"rewards/_format_reward": 0.0,
|
55 |
+
"step": 30
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"clip_ratio": 0.0,
|
59 |
+
"completion_length": 546.765625,
|
60 |
+
"epoch": 0.992,
|
61 |
+
"kl": 0.0019164085388183594,
|
62 |
+
"num_tokens": 2640088.0,
|
63 |
+
"reward": 0.1563290636986494,
|
64 |
+
"reward_std": 0.2728244187310338,
|
65 |
+
"rewards/_accuracy_reward": 0.1563290636986494,
|
66 |
+
"rewards/_format_reward": 0.0,
|
67 |
+
"step": 31,
|
68 |
+
"total_flos": 0.0,
|
69 |
+
"train_loss": 0.0035367666953994383,
|
70 |
+
"train_runtime": 25807.2718,
|
71 |
+
"train_samples_per_second": 0.039,
|
72 |
+
"train_steps_per_second": 0.001
|
73 |
+
}
|
74 |
+
],
|
75 |
+
"logging_steps": 10,
|
76 |
+
"max_steps": 31,
|
77 |
+
"num_input_tokens_seen": 0,
|
78 |
+
"num_train_epochs": 1,
|
79 |
+
"save_steps": 500,
|
80 |
+
"stateful_callbacks": {
|
81 |
+
"TrainerControl": {
|
82 |
+
"args": {
|
83 |
+
"should_epoch_stop": false,
|
84 |
+
"should_evaluate": false,
|
85 |
+
"should_log": false,
|
86 |
+
"should_save": true,
|
87 |
+
"should_training_stop": true
|
88 |
+
},
|
89 |
+
"attributes": {}
|
90 |
+
}
|
91 |
+
},
|
92 |
+
"total_flos": 0.0,
|
93 |
+
"train_batch_size": 8,
|
94 |
+
"trial_name": null,
|
95 |
+
"trial_params": null
|
96 |
+
}
|