HarryJoshAI commited on
Commit
93f34b7
·
verified ·
1 Parent(s): c66c242

Upload folder using huggingface_hub

Browse files
Files changed (39) hide show
  1. 01/final/added_tokens.json +3 -0
  2. 01/final/config.json +39 -0
  3. 01/final/generation_config.json +6 -0
  4. 01/final/logs/trl/1750112156.9130752/events.out.tfevents.1750112156.ip-10-192-12-175.5415.1 +3 -0
  5. 01/final/logs/trl/1750112156.9143343/hparams.yml +46 -0
  6. 01/final/logs/trl/1750112185.023031/events.out.tfevents.1750112185.ip-10-192-12-175.5719.1 +3 -0
  7. 01/final/logs/trl/1750112185.024333/hparams.yml +46 -0
  8. 01/final/logs/trl/1750112240.7612593/events.out.tfevents.1750112240.ip-10-192-12-175.6494.1 +3 -0
  9. 01/final/logs/trl/1750112240.7625916/hparams.yml +46 -0
  10. 01/final/logs/trl/1750112276.7026455/events.out.tfevents.1750112276.ip-10-192-12-175.6841.1 +3 -0
  11. 01/final/logs/trl/1750112276.7038748/hparams.yml +46 -0
  12. 01/final/logs/trl/1750112333.4929345/events.out.tfevents.1750112333.ip-10-192-12-175.7509.1 +3 -0
  13. 01/final/logs/trl/1750112333.4942954/hparams.yml +46 -0
  14. 01/final/logs/trl/1750112360.9809108/events.out.tfevents.1750112360.ip-10-192-12-175.7831.1 +3 -0
  15. 01/final/logs/trl/1750112360.9823127/hparams.yml +46 -0
  16. 01/final/logs/trl/1750112406.541964/events.out.tfevents.1750112406.ip-10-192-12-175.8243.1 +3 -0
  17. 01/final/logs/trl/1750112406.5431876/hparams.yml +46 -0
  18. 01/final/logs/trl/1750112449.5937607/events.out.tfevents.1750112449.ip-10-192-12-175.8863.1 +3 -0
  19. 01/final/logs/trl/1750112449.5949225/hparams.yml +46 -0
  20. 01/final/logs/trl/1750112502.7784708/events.out.tfevents.1750112502.ip-10-192-12-175.9534.1 +3 -0
  21. 01/final/logs/trl/1750112502.779761/hparams.yml +46 -0
  22. 01/final/logs/trl/events.out.tfevents.1750112156.ip-10-192-12-175.5415.0 +3 -0
  23. 01/final/logs/trl/events.out.tfevents.1750112185.ip-10-192-12-175.5719.0 +3 -0
  24. 01/final/logs/trl/events.out.tfevents.1750112240.ip-10-192-12-175.6494.0 +3 -0
  25. 01/final/logs/trl/events.out.tfevents.1750112276.ip-10-192-12-175.6841.0 +3 -0
  26. 01/final/logs/trl/events.out.tfevents.1750112333.ip-10-192-12-175.7509.0 +3 -0
  27. 01/final/logs/trl/events.out.tfevents.1750112360.ip-10-192-12-175.7831.0 +3 -0
  28. 01/final/logs/trl/events.out.tfevents.1750112406.ip-10-192-12-175.8243.0 +3 -0
  29. 01/final/logs/trl/events.out.tfevents.1750112449.ip-10-192-12-175.8863.0 +3 -0
  30. 01/final/logs/trl/events.out.tfevents.1750112502.ip-10-192-12-175.9534.0 +3 -0
  31. 01/final/merges.txt +0 -0
  32. 01/final/model.safetensors +3 -0
  33. 01/final/special_tokens_map.json +24 -0
  34. 01/final/tokenizer.json +0 -0
  35. 01/final/tokenizer_config.json +23 -0
  36. 01/final/vocab.json +0 -0
  37. README.md +2 -8
  38. chatbot_ui.py +83 -0
  39. requirements.txt +14 -0
01/final/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 28917
3
+ }
01/final/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "01/medical_model/final",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 128,
15
+ "n_embd": 512,
16
+ "n_head": 8,
17
+ "n_inner": null,
18
+ "n_layer": 6,
19
+ "n_positions": 128,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.38.2",
37
+ "use_cache": true,
38
+ "vocab_size": 28918
39
+ }
01/final/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.38.2"
6
+ }
01/final/logs/trl/1750112156.9130752/events.out.tfevents.1750112156.ip-10-192-12-175.5415.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed1e6493668bfd1f1704cc9db94b5deb01c34853b6595a1b962278c838964f8
3
+ size 2339
01/final/logs/trl/1750112156.9143343/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 256
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 256
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 79
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112185.023031/events.out.tfevents.1750112185.ip-10-192-12-175.5719.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9f8f6431a6d0af066e868a4cc9116d51911f79dad4aa62a463e34a8049fab8d
3
+ size 2339
01/final/logs/trl/1750112185.024333/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112240.7612593/events.out.tfevents.1750112240.ip-10-192-12-175.6494.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090079639d189f09bed464ec6388066bf58bc949dc2cb2862bd5d48e382935aa
3
+ size 2339
01/final/logs/trl/1750112240.7625916/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112276.7026455/events.out.tfevents.1750112276.ip-10-192-12-175.6841.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06d244f16508c6c7d36a3faa0553a99e0155d955285e37ba418f1c979167c7eb
3
+ size 2339
01/final/logs/trl/1750112276.7038748/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112333.4929345/events.out.tfevents.1750112333.ip-10-192-12-175.7509.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b90983b03ea1e3d6fd3f451e3feab5f6c7678c5a452546e9cf38bd20e5b6ab0
3
+ size 2339
01/final/logs/trl/1750112333.4942954/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112360.9809108/events.out.tfevents.1750112360.ip-10-192-12-175.7831.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dafa7d0f48494e40a5b96a334b7f5c64def2a98317d839c22cf57ba72949342b
3
+ size 2339
01/final/logs/trl/1750112360.9823127/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112406.541964/events.out.tfevents.1750112406.ip-10-192-12-175.8243.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:462bafcf974edabb9e2e8e3741457e8d3a8a9e5729817ee3162a4bebcd11c3b1
3
+ size 2339
01/final/logs/trl/1750112406.5431876/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112449.5937607/events.out.tfevents.1750112449.ip-10-192-12-175.8863.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a018c7e79a0a662b2aba41cfe37dfa0520e18b52c2b342aa600865d5e332d0b
3
+ size 2339
01/final/logs/trl/1750112449.5949225/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/1750112502.7784708/events.out.tfevents.1750112502.ip-10-192-12-175.9534.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ed1eb598de0c89adfb2076963588e271a13bffa2780c0829f907fb83e9e596
3
+ size 2339
01/final/logs/trl/1750112502.779761/hparams.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adap_kl_ctrl: true
2
+ backward_batch_size: 1
3
+ batch_size: 32
4
+ cliprange: 0.2
5
+ cliprange_value: 0.2
6
+ compare_steps: 1
7
+ early_stopping: false
8
+ exp_name: train_rl
9
+ forward_batch_size: null
10
+ gamma: 1
11
+ global_backward_batch_size: 1
12
+ global_batch_size: 32
13
+ gradient_accumulation_steps: 1
14
+ gradient_checkpointing: false
15
+ horizon: 10000
16
+ init_kl_coef: 0.2
17
+ is_encoder_decoder: false
18
+ is_peft_model: false
19
+ kl_penalty: kl
20
+ lam: 0.95
21
+ learning_rate: 1.41e-05
22
+ log_with: tensorboard
23
+ max_grad_norm: null
24
+ mini_batch_size: 1
25
+ model_name: 01/medical_model/final
26
+ optimize_cuda_cache: null
27
+ optimize_device_cache: false
28
+ ppo_epochs: 4
29
+ project_kwargs/logging_dir: 01/medical_model_rl/final/logs
30
+ query_dataset: imdb
31
+ ratio_threshold: 10.0
32
+ remove_unused_columns: true
33
+ reward_model: sentiment-analysis:lvwerra/distilbert-imdb
34
+ score_clip: null
35
+ seed: 0
36
+ steps: 20000
37
+ target: 6
38
+ target_kl: 0.1
39
+ task_name: null
40
+ total_ppo_epochs: 625
41
+ tracker_project_name: trl
42
+ use_score_norm: false
43
+ use_score_scaling: false
44
+ vf_coef: 0.1
45
+ whiten_rewards: false
46
+ world_size: 1
01/final/logs/trl/events.out.tfevents.1750112156.ip-10-192-12-175.5415.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad9cb7e5c40dd16a448a907af3701ea0a94308070ca4887051509c4ed207141
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112185.ip-10-192-12-175.5719.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f250f4d66f6affa875cc06f007ba6ff54616cca0f84e22ce4ef0b79981df1be
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112240.ip-10-192-12-175.6494.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb9e2dc71e826553a1147b2bc1d91a5d3cb43a8fe6644b3721c87a7ef7322a03
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112276.ip-10-192-12-175.6841.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2635f8bf182bc351006573c0e3d488874ba78fa9e128d253dcefbadeaf285b1f
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112333.ip-10-192-12-175.7509.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fd4d33fac511198a12eb01dae7908a577232a1208c855e6c16dde717523e0d9
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112360.ip-10-192-12-175.7831.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a9103dd8e0542f7c5fc7490b9be13948123ad5a7b191feb1f5027a46511ef08
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112406.ip-10-192-12-175.8243.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69573e7b3d84660550e8af8be861e918d4e47f5bd252ea1dbd604e4456171037
3
+ size 88
01/final/logs/trl/events.out.tfevents.1750112449.ip-10-192-12-175.8863.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f150d7975757e8502db77022d33da0c5623c17ab763d15ebfd954c15a3eb898
3
+ size 13794
01/final/logs/trl/events.out.tfevents.1750112502.ip-10-192-12-175.9534.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adb2fa1334f4b9a00e0116cf8476e1486d2fe6e92583fffb4fdacd2580957fe
3
+ size 13794
01/final/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
01/final/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a8ae3b07fca46dc8e5793b7e9cadd75565f5f26a29f4d9c7d7331c93865b5b
3
+ size 135157316
01/final/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
01/final/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
01/final/tokenizer_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "28917": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ }
13
+ },
14
+ "bos_token": "<|endoftext|>",
15
+ "clean_up_tokenization_spaces": true,
16
+ "eos_token": "<|endoftext|>",
17
+ "errors": "replace",
18
+ "model_max_length": 128,
19
+ "pad_token": "<|endoftext|>",
20
+ "padding_side": "left",
21
+ "tokenizer_class": "GPT2Tokenizer",
22
+ "unk_token": "<|endoftext|>"
23
+ }
01/final/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: Med Demo App
3
- emoji:
4
- colorFrom: gray
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.34.0
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: med_demo_app
3
+ app_file: chatbot_ui.py
 
 
4
  sdk: gradio
5
  sdk_version: 5.34.0
 
 
6
  ---
 
 
chatbot_ui.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from termcolor import colored
5
+
6
+ # --- Model and Tokenizer Loading ---
7
+ MODEL_PATH = "01/medical_model_rl/final"
8
+ TOKENIZER_PATH = "01/medical_model_rl/final"
9
+
10
+ print("Loading model and tokenizer...")
11
+
12
+ try:
13
+ tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH, padding_side='left')
14
+ model = AutoModelForCausalLM.from_pretrained(MODEL_PATH)
15
+
16
+ model.resize_token_embeddings(len(tokenizer))
17
+
18
+ if tokenizer.pad_token is None:
19
+ tokenizer.pad_token = tokenizer.eos_token
20
+
21
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+ model.to(device)
23
+ model.eval()
24
+
25
+ print(colored("Model loaded successfully.", "green"))
26
+
27
+ except Exception as e:
28
+ print(colored(f"Error loading model: {e}", "red"))
29
+ model = None
30
+ tokenizer = None
31
+
32
+ # --- Chatbot Inference Function ---
33
+ def medical_chatbot(message, history):
34
+ """
35
+ Generates a response from the medical chatbot model.
36
+ """
37
+ if not model or not tokenizer:
38
+ return "Error: Model is not loaded. Please check the console for errors."
39
+
40
+ try:
41
+ # Format the prompt
42
+ full_prompt = f"Question: {message}\n\nAnswer:"
43
+
44
+ # Tokenize the input
45
+ inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True).to(device)
46
+
47
+ # Generate a response
48
+ with torch.no_grad():
49
+ output_sequences = model.generate(
50
+ input_ids=inputs["input_ids"],
51
+ attention_mask=inputs["attention_mask"],
52
+ max_length=128,
53
+ do_sample=True,
54
+ top_k=50,
55
+ top_p=0.95,
56
+ num_return_sequences=1,
57
+ pad_token_id=tokenizer.eos_token_id,
58
+ )
59
+
60
+ # Decode the response
61
+ response_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
62
+
63
+ # Extract only the answer part
64
+ answer = response_text.split("Answer:")[-1].strip()
65
+
66
+ return answer
67
+
68
+ except Exception as e:
69
+ print(colored(f"An error occurred during inference: {e}", "red"))
70
+ return "Sorry, I encountered an error. Please try again."
71
+
72
+ # --- Gradio UI ---
73
+ chatbot_interface = gr.ChatInterface(
74
+ fn=medical_chatbot,
75
+ title="Medical Chatbot",
76
+ description="Ask any medical question, and the AI will try to answer.",
77
+ examples=[
78
+ ["What are the symptoms of diabetes?"],
79
+ ["How does metformin work?"],
80
+ ["What is the difference between a virus and a bacteria?"],
81
+ ],
82
+ theme="soft",
83
+ ).launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets
2
+ tokenizers
3
+ transformers
4
+ torch
5
+ termcolor
6
+ beautifulsoup4
7
+ detoxify
8
+ rouge-score
9
+ tqdm
10
+ huggingface_hub
11
+ trl
12
+ ipywidgets
13
+ tensorboard
14
+ bitsandbytes