Xenova HF Staff commited on
Commit
3f3f2ee
·
verified ·
1 Parent(s): d463d7b

Update tokenizer files (and add tokenizer.json) (#3)

Browse files

- Update tokenizer files (and add tokenizer.json) (84ece70140c20bbac8f6f5f9b818a54ce6ffa1ac)

Files changed (3) hide show
  1. tokenizer.json +0 -0
  2. tokenizer_config.json +22 -33
  3. vocab.json +0 -0
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,41 +1,30 @@
1
  {
2
  "add_bos_token": true,
3
  "add_prefix_space": false,
4
- "bos_token": {
5
- "__type": "AddedToken",
6
- "content": "</s>",
7
- "lstrip": false,
8
- "normalized": true,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "eos_token": {
13
- "__type": "AddedToken",
14
- "content": "</s>",
15
- "lstrip": false,
16
- "normalized": true,
17
- "rstrip": false,
18
- "single_word": false
 
 
19
  },
 
 
 
20
  "errors": "replace",
21
  "model_max_length": 100,
22
- "name_or_path": "facebook/opt-350m",
23
- "pad_token": {
24
- "__type": "AddedToken",
25
- "content": "<pad>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
- "special_tokens_map_file": null,
32
  "tokenizer_class": "GPT2Tokenizer",
33
- "unk_token": {
34
- "__type": "AddedToken",
35
- "content": "</s>",
36
- "lstrip": false,
37
- "normalized": true,
38
- "rstrip": false,
39
- "single_word": false
40
- }
41
  }
 
1
  {
2
  "add_bos_token": true,
3
  "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "1": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "2": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ }
21
  },
22
+ "bos_token": "</s>",
23
+ "clean_up_tokenization_spaces": true,
24
+ "eos_token": "</s>",
25
  "errors": "replace",
26
  "model_max_length": 100,
27
+ "pad_token": "<pad>",
 
 
 
 
 
 
 
 
 
28
  "tokenizer_class": "GPT2Tokenizer",
29
+ "unk_token": "</s>"
 
 
 
 
 
 
 
30
  }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff