sgugger commited on
Commit
77ebbbf
·
1 Parent(s): cd0d132

Upload tiny models for YosoForSequenceClassification

Browse files
config.json CHANGED
@@ -26,5 +26,5 @@
26
  "type_vocab_size": 16,
27
  "use_expectation": true,
28
  "use_fast_hash": true,
29
- "vocab_size": 50265
30
  }
 
26
  "type_vocab_size": 16,
27
  "use_expectation": true,
28
  "use_fast_hash": true,
29
+ "vocab_size": 1024
30
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be8214da7dc6ef9217d0e667df02a13d3fbc4b8803baf9a24756a5ef455b4e0a
3
- size 6675071
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18a6a1d4b7609c43ac8c20a56325ea55183775524d3fb12f0c9ecd0dbf52dd2
3
+ size 372223
special_tokens_map.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
4
- "eos_token": "[SEP]",
5
  "mask_token": {
6
- "content": "[MASK]",
7
  "lstrip": true,
8
  "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
  "pad_token": "<pad>",
13
- "sep_token": "[SEP]",
14
  "unk_token": "<unk>"
15
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
  "mask_token": {
6
+ "content": "<mask>",
7
  "lstrip": true,
8
  "normalized": false,
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
  "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
  "unk_token": "<unk>"
15
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,12 +1,14 @@
1
  {
2
- "bos_token": "[CLS]",
3
- "cls_token": "[CLS]",
 
4
  "do_lower_case": true,
5
- "eos_token": "[SEP]",
 
6
  "keep_accents": false,
7
  "mask_token": {
8
  "__type": "AddedToken",
9
- "content": "[MASK]",
10
  "lstrip": true,
11
  "normalized": false,
12
  "rstrip": false,
@@ -15,9 +17,9 @@
15
  "model_max_length": 512,
16
  "pad_token": "<pad>",
17
  "remove_space": true,
18
- "sep_token": "[SEP]",
19
- "sp_model_kwargs": {},
20
  "special_tokens_map_file": null,
21
  "tokenizer_class": "AlbertTokenizer",
 
22
  "unk_token": "<unk>"
23
  }
 
1
  {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
  "do_lower_case": true,
6
+ "eos_token": "</s>",
7
+ "errors": "replace",
8
  "keep_accents": false,
9
  "mask_token": {
10
  "__type": "AddedToken",
11
+ "content": "<mask>",
12
  "lstrip": true,
13
  "normalized": false,
14
  "rstrip": false,
 
17
  "model_max_length": 512,
18
  "pad_token": "<pad>",
19
  "remove_space": true,
20
+ "sep_token": "</s>",
 
21
  "special_tokens_map_file": null,
22
  "tokenizer_class": "AlbertTokenizer",
23
+ "trim_offsets": true,
24
  "unk_token": "<unk>"
25
  }