EbanLee commited on
Commit
d6deb9c
·
1 Parent(s): ed65133

model upload

Browse files
README.md CHANGED
@@ -33,7 +33,7 @@ length_penalty=1.5,
33
  max_length=256,
34
  min_length=12,
35
  num_beams=6,
36
- repetition_penalty=2.0,
37
  )
38
 
39
  # Decoding Text Ids
 
33
  max_length=256,
34
  min_length=12,
35
  num_beams=6,
36
+ repetition_penalty=1.5,
37
  )
38
 
39
  # Decoding Text Ids
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "EbanLee/kobart-summary-v2",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "add_bias_logits": false,
@@ -9,7 +9,7 @@
9
  ],
10
  "attention_dropout": 0.0,
11
  "author": "EbanLee([email protected])",
12
- "bos_token_id": 0,
13
  "classif_dropout": 0.1,
14
  "classifier_dropout": 0.1,
15
  "d_model": 768,
@@ -17,7 +17,7 @@
17
  "decoder_ffn_dim": 3072,
18
  "decoder_layerdrop": 0.0,
19
  "decoder_layers": 6,
20
- "decoder_start_token_id": 2,
21
  "do_blenderbot_90_layernorm": false,
22
  "dropout": 0.1,
23
  "encoder_attention_heads": 16,
@@ -27,7 +27,7 @@
27
  "eos_token_id": 1,
28
  "extra_pos_embeddings": 2,
29
  "force_bos_token_to_be_generated": false,
30
- "forced_eos_token_id": 2,
31
  "gradient_checkpointing": false,
32
  "id2label": {
33
  "0": "NEGATIVE",
@@ -48,17 +48,18 @@
48
  "pad_token_id": 3,
49
  "scale_embedding": false,
50
  "static_position_embeddings": false,
 
 
 
 
 
51
  "task_specific_params": {
52
  "summarization": {
53
  "length_penalty": 1.5,
54
  "max_length": 256,
55
  "min_length": 12,
56
  "num_beams": 6,
57
- "repetition_penalty": 2.0
58
  }
59
- },
60
- "torch_dtype": "float32",
61
- "transformers_version": "4.25.1",
62
- "use_cache": true,
63
- "vocab_size": 30000
64
  }
 
1
  {
2
+ "_name_or_path": "kobart-summary-v2",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "add_bias_logits": false,
 
9
  ],
10
  "attention_dropout": 0.0,
11
  "author": "EbanLee([email protected])",
12
+ "bos_token_id": 1,
13
  "classif_dropout": 0.1,
14
  "classifier_dropout": 0.1,
15
  "d_model": 768,
 
17
  "decoder_ffn_dim": 3072,
18
  "decoder_layerdrop": 0.0,
19
  "decoder_layers": 6,
20
+ "decoder_start_token_id": 1,
21
  "do_blenderbot_90_layernorm": false,
22
  "dropout": 0.1,
23
  "encoder_attention_heads": 16,
 
27
  "eos_token_id": 1,
28
  "extra_pos_embeddings": 2,
29
  "force_bos_token_to_be_generated": false,
30
+ "forced_eos_token_id": 1,
31
  "gradient_checkpointing": false,
32
  "id2label": {
33
  "0": "NEGATIVE",
 
48
  "pad_token_id": 3,
49
  "scale_embedding": false,
50
  "static_position_embeddings": false,
51
+ "tokenizer_class": "PreTrainedTokenizerFast",
52
+ "torch_dtype": "float32",
53
+ "transformers_version": "4.38.2",
54
+ "use_cache": true,
55
+ "vocab_size": 30000,
56
  "task_specific_params": {
57
  "summarization": {
58
  "length_penalty": 1.5,
59
  "max_length": 256,
60
  "min_length": 12,
61
  "num_beams": 6,
62
+ "repetition_penalty": 1.5
63
  }
64
+ }
 
 
 
 
65
  }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "decoder_start_token_id": 1,
5
+ "eos_token_id": 1,
6
+ "forced_eos_token_id": 1,
7
+ "pad_token_id": 3,
8
+ "transformers_version": "4.38.2"
9
+ }
pytorch_model.bin → model.safetensors RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:686a6eda47b279b39c8c75c5a27bee1a92f1edb56f4e2c05159f8d81780baef7
3
- size 495648413
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb2372526484e40732a6f421a8752ec2887ccc2eec8011c64ca8c6fb9dd4426
3
+ size 495589768