Yanjie Zhao commited on
Commit
1ce1760
·
1 Parent(s): 8533a0d

model updated after cleaned data

Browse files
Files changed (3) hide show
  1. config.json +2 -2
  2. pytorch_model.bin +2 -2
  3. tokenizer.json +10 -10
config.json CHANGED
@@ -8,7 +8,7 @@
8
  "dim": 768,
9
  "dropout": 0.1,
10
  "hidden_dim": 3072,
11
- "id2label":{
12
  "0": "blank",
13
  "1": "great",
14
  "2": "welcome",
@@ -54,6 +54,6 @@
54
  "sinusoidal_pos_embds": false,
55
  "tie_weights_": true,
56
  "torch_dtype": "float32",
57
- "transformers_version": "4.16.2",
58
  "vocab_size": 30522
59
  }
 
8
  "dim": 768,
9
  "dropout": 0.1,
10
  "hidden_dim": 3072,
11
+ "id2label": {
12
  "0": "blank",
13
  "1": "great",
14
  "2": "welcome",
 
54
  "sinusoidal_pos_embds": false,
55
  "tie_weights_": true,
56
  "torch_dtype": "float32",
57
+ "transformers_version": "4.17.0",
58
  "vocab_size": 30522
59
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17fccf0713b4dcd99e5be27f523ee597e02ca95134220d567ac3ca394cd6246e
3
- size 267900017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9e655f81fb22f464ea28fe5b1ccb4d3c1f06d1a9ff63e01b328e56e78cad87e
3
+ size 267900465
tokenizer.json CHANGED
@@ -19,48 +19,48 @@
19
  "added_tokens": [
20
  {
21
  "id": 0,
22
- "special": true,
23
  "content": "[PAD]",
24
  "single_word": false,
25
  "lstrip": false,
26
  "rstrip": false,
27
- "normalized": false
 
28
  },
29
  {
30
  "id": 100,
31
- "special": true,
32
  "content": "[UNK]",
33
  "single_word": false,
34
  "lstrip": false,
35
  "rstrip": false,
36
- "normalized": false
 
37
  },
38
  {
39
  "id": 101,
40
- "special": true,
41
  "content": "[CLS]",
42
  "single_word": false,
43
  "lstrip": false,
44
  "rstrip": false,
45
- "normalized": false
 
46
  },
47
  {
48
  "id": 102,
49
- "special": true,
50
  "content": "[SEP]",
51
  "single_word": false,
52
  "lstrip": false,
53
  "rstrip": false,
54
- "normalized": false
 
55
  },
56
  {
57
  "id": 103,
58
- "special": true,
59
  "content": "[MASK]",
60
  "single_word": false,
61
  "lstrip": false,
62
  "rstrip": false,
63
- "normalized": false
 
64
  }
65
  ],
66
  "normalizer": {
 
19
  "added_tokens": [
20
  {
21
  "id": 0,
 
22
  "content": "[PAD]",
23
  "single_word": false,
24
  "lstrip": false,
25
  "rstrip": false,
26
+ "normalized": false,
27
+ "special": true
28
  },
29
  {
30
  "id": 100,
 
31
  "content": "[UNK]",
32
  "single_word": false,
33
  "lstrip": false,
34
  "rstrip": false,
35
+ "normalized": false,
36
+ "special": true
37
  },
38
  {
39
  "id": 101,
 
40
  "content": "[CLS]",
41
  "single_word": false,
42
  "lstrip": false,
43
  "rstrip": false,
44
+ "normalized": false,
45
+ "special": true
46
  },
47
  {
48
  "id": 102,
 
49
  "content": "[SEP]",
50
  "single_word": false,
51
  "lstrip": false,
52
  "rstrip": false,
53
+ "normalized": false,
54
+ "special": true
55
  },
56
  {
57
  "id": 103,
 
58
  "content": "[MASK]",
59
  "single_word": false,
60
  "lstrip": false,
61
  "rstrip": false,
62
+ "normalized": false,
63
+ "special": true
64
  }
65
  ],
66
  "normalizer": {