Commit
·
b630245
1
Parent(s):
ebadb62
Update tiny models for EfficientFormerForImageClassification
Browse files- config.json +16 -14
- preprocessor_config.json +4 -4
- pytorch_model.bin +2 -2
- tf_model.h5 +3 -0
config.json
CHANGED
@@ -1,16 +1,18 @@
|
|
1 |
{
|
|
|
2 |
"architectures": [
|
3 |
"EfficientFormerForImageClassification"
|
4 |
],
|
5 |
"attention_probs_dropout_prob": 0.1,
|
6 |
"attention_ratio": 4,
|
|
|
7 |
"depths": [
|
8 |
-
3,
|
9 |
2,
|
10 |
-
|
11 |
-
|
|
|
12 |
],
|
13 |
-
"dim":
|
14 |
"distillation": true,
|
15 |
"downsample_pad": 1,
|
16 |
"downsample_patch_size": 3,
|
@@ -25,29 +27,29 @@
|
|
25 |
"encoder_stride": 2,
|
26 |
"hidden_act": "gelu",
|
27 |
"hidden_dropout_prob": 0.1,
|
28 |
-
"hidden_size":
|
29 |
"hidden_sizes": [
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
],
|
35 |
-
"image_size":
|
36 |
"initializer_range": 0.02,
|
37 |
"intermediate_size": 37,
|
38 |
"key_dim": 32,
|
39 |
"layer_norm_eps": 1e-12,
|
40 |
"layer_scale_init_value": 1e-05,
|
41 |
-
"mlp_expansion_ratio":
|
42 |
"model_type": "efficientformer",
|
43 |
-
"num_attention_heads":
|
44 |
"num_channels": 3,
|
45 |
"num_hidden_layers": 7,
|
46 |
"num_meta3d_blocks": 1,
|
47 |
"patch_size": 2,
|
48 |
"pool_size": 3,
|
49 |
-
"resolution":
|
50 |
"torch_dtype": "float32",
|
51 |
-
"transformers_version": "4.
|
52 |
"use_layer_scale": true
|
53 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "tiny_models/efficientformer/EfficientFormerForImageClassification",
|
3 |
"architectures": [
|
4 |
"EfficientFormerForImageClassification"
|
5 |
],
|
6 |
"attention_probs_dropout_prob": 0.1,
|
7 |
"attention_ratio": 4,
|
8 |
+
"batch_norm_eps": 1e-05,
|
9 |
"depths": [
|
|
|
10 |
2,
|
11 |
+
2,
|
12 |
+
2,
|
13 |
+
2
|
14 |
],
|
15 |
+
"dim": 128,
|
16 |
"distillation": true,
|
17 |
"downsample_pad": 1,
|
18 |
"downsample_patch_size": 3,
|
|
|
27 |
"encoder_stride": 2,
|
28 |
"hidden_act": "gelu",
|
29 |
"hidden_dropout_prob": 0.1,
|
30 |
+
"hidden_size": 128,
|
31 |
"hidden_sizes": [
|
32 |
+
16,
|
33 |
+
32,
|
34 |
+
64,
|
35 |
+
128
|
36 |
],
|
37 |
+
"image_size": 64,
|
38 |
"initializer_range": 0.02,
|
39 |
"intermediate_size": 37,
|
40 |
"key_dim": 32,
|
41 |
"layer_norm_eps": 1e-12,
|
42 |
"layer_scale_init_value": 1e-05,
|
43 |
+
"mlp_expansion_ratio": 2,
|
44 |
"model_type": "efficientformer",
|
45 |
+
"num_attention_heads": 4,
|
46 |
"num_channels": 3,
|
47 |
"num_hidden_layers": 7,
|
48 |
"num_meta3d_blocks": 1,
|
49 |
"patch_size": 2,
|
50 |
"pool_size": 3,
|
51 |
+
"resolution": 2,
|
52 |
"torch_dtype": "float32",
|
53 |
+
"transformers_version": "4.30.0.dev0",
|
54 |
"use_layer_scale": true
|
55 |
}
|
preprocessor_config.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
{
|
2 |
"crop_size": {
|
3 |
-
"height":
|
4 |
-
"width":
|
5 |
},
|
6 |
"do_center_crop": true,
|
7 |
"do_normalize": true,
|
@@ -21,7 +21,7 @@
|
|
21 |
"resample": 3,
|
22 |
"rescale_factor": 0.00392156862745098,
|
23 |
"size": {
|
24 |
-
"height":
|
25 |
-
"width":
|
26 |
}
|
27 |
}
|
|
|
1 |
{
|
2 |
"crop_size": {
|
3 |
+
"height": 64,
|
4 |
+
"width": 64
|
5 |
},
|
6 |
"do_center_crop": true,
|
7 |
"do_normalize": true,
|
|
|
21 |
"resample": 3,
|
22 |
"rescale_factor": 0.00392156862745098,
|
23 |
"size": {
|
24 |
+
"height": 64,
|
25 |
+
"width": 64
|
26 |
}
|
27 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:610ca08329fed950045fa8922b9acf0161862c5fc6242691426df3cf371d5903
|
3 |
+
size 1843305
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd17ab6c4d7d8077106aab047d7ccbfa4736f03ee16554a3caa122d90d8f03f2
|
3 |
+
size 1951984
|