hf-transformers-bot commited on
Commit
dbca2a0
·
1 Parent(s): ebadb62

Update tiny models for EfficientFormerForImageClassification

Browse files
Files changed (4) hide show
  1. config.json +16 -14
  2. preprocessor_config.json +4 -4
  3. pytorch_model.bin +2 -2
  4. tf_model.h5 +3 -0
config.json CHANGED
@@ -1,16 +1,18 @@
1
  {
 
2
  "architectures": [
3
  "EfficientFormerForImageClassification"
4
  ],
5
  "attention_probs_dropout_prob": 0.1,
6
  "attention_ratio": 4,
 
7
  "depths": [
8
- 3,
9
  2,
10
- 6,
11
- 4
 
12
  ],
13
- "dim": 448,
14
  "distillation": true,
15
  "downsample_pad": 1,
16
  "downsample_patch_size": 3,
@@ -25,29 +27,29 @@
25
  "encoder_stride": 2,
26
  "hidden_act": "gelu",
27
  "hidden_dropout_prob": 0.1,
28
- "hidden_size": 448,
29
  "hidden_sizes": [
30
- 48,
31
- 96,
32
- 224,
33
- 448
34
  ],
35
- "image_size": 224,
36
  "initializer_range": 0.02,
37
  "intermediate_size": 37,
38
  "key_dim": 32,
39
  "layer_norm_eps": 1e-12,
40
  "layer_scale_init_value": 1e-05,
41
- "mlp_expansion_ratio": 4,
42
  "model_type": "efficientformer",
43
- "num_attention_heads": 8,
44
  "num_channels": 3,
45
  "num_hidden_layers": 7,
46
  "num_meta3d_blocks": 1,
47
  "patch_size": 2,
48
  "pool_size": 3,
49
- "resolution": 7,
50
  "torch_dtype": "float32",
51
- "transformers_version": "4.28.0.dev0",
52
  "use_layer_scale": true
53
  }
 
1
  {
2
+ "_name_or_path": "tiny_models/efficientformer/EfficientFormerForImageClassification",
3
  "architectures": [
4
  "EfficientFormerForImageClassification"
5
  ],
6
  "attention_probs_dropout_prob": 0.1,
7
  "attention_ratio": 4,
8
+ "batch_norm_eps": 1e-05,
9
  "depths": [
 
10
  2,
11
+ 2,
12
+ 2,
13
+ 2
14
  ],
15
+ "dim": 128,
16
  "distillation": true,
17
  "downsample_pad": 1,
18
  "downsample_patch_size": 3,
 
27
  "encoder_stride": 2,
28
  "hidden_act": "gelu",
29
  "hidden_dropout_prob": 0.1,
30
+ "hidden_size": 128,
31
  "hidden_sizes": [
32
+ 16,
33
+ 32,
34
+ 64,
35
+ 128
36
  ],
37
+ "image_size": 64,
38
  "initializer_range": 0.02,
39
  "intermediate_size": 37,
40
  "key_dim": 32,
41
  "layer_norm_eps": 1e-12,
42
  "layer_scale_init_value": 1e-05,
43
+ "mlp_expansion_ratio": 2,
44
  "model_type": "efficientformer",
45
+ "num_attention_heads": 4,
46
  "num_channels": 3,
47
  "num_hidden_layers": 7,
48
  "num_meta3d_blocks": 1,
49
  "patch_size": 2,
50
  "pool_size": 3,
51
+ "resolution": 2,
52
  "torch_dtype": "float32",
53
+ "transformers_version": "4.31.0.dev0",
54
  "use_layer_scale": true
55
  }
preprocessor_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "crop_size": {
3
- "height": 224,
4
- "width": 224
5
  },
6
  "do_center_crop": true,
7
  "do_normalize": true,
@@ -21,7 +21,7 @@
21
  "resample": 3,
22
  "rescale_factor": 0.00392156862745098,
23
  "size": {
24
- "height": 224,
25
- "width": 224
26
  }
27
  }
 
1
  {
2
  "crop_size": {
3
+ "height": 64,
4
+ "width": 64
5
  },
6
  "do_center_crop": true,
7
  "do_normalize": true,
 
21
  "resample": 3,
22
  "rescale_factor": 0.00392156862745098,
23
  "size": {
24
+ "height": 64,
25
+ "width": 64
26
  }
27
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52ee27f06678a90465d0e1beb6f1da50a99346b0130769ab92c0b545b97f3910
3
- size 45819689
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2124748eb8901447d7737dc00b45ee69d3300db5dd20f786437b1a1c25253926
3
+ size 1843305
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e1d6737290525d6b0af57318f03256217680b330a6848261178bbb534ff495d
3
+ size 1951984