microwaveablemax commited on
Commit
3a751f6
·
verified ·
1 Parent(s): a3bb24d

End of training

Browse files
README.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: dennisjooo/Birds-Classifier-EfficientNetB2
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - imagefolder
9
+ metrics:
10
+ - f1
11
+ - precision
12
+ - recall
13
+ - accuracy
14
+ model-index:
15
+ - name: train_checkpoints2
16
+ results:
17
+ - task:
18
+ name: Image Classification
19
+ type: image-classification
20
+ dataset:
21
+ name: imagefolder
22
+ type: imagefolder
23
+ config: default
24
+ split: validation
25
+ args: default
26
+ metrics:
27
+ - name: F1
28
+ type: f1
29
+ value: 0.8685894687564659
30
+ - name: Precision
31
+ type: precision
32
+ value: 0.8781544844044844
33
+ - name: Recall
34
+ type: recall
35
+ value: 0.8634882138558609
36
+ - name: Accuracy
37
+ type: accuracy
38
+ value: 0.8686131386861314
39
+ ---
40
+
41
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
42
+ should probably proofread and complete it, then remove this comment. -->
43
+
44
+ # train_checkpoints2
45
+
46
+ This model is a fine-tuned version of [dennisjooo/Birds-Classifier-EfficientNetB2](https://huggingface.co/dennisjooo/Birds-Classifier-EfficientNetB2) on the imagefolder dataset.
47
+ It achieves the following results on the evaluation set:
48
+ - Loss: 0.4826
49
+ - F1: 0.8686
50
+ - Precision: 0.8782
51
+ - Recall: 0.8635
52
+ - Accuracy: 0.8686
53
+
54
+ ## Model description
55
+
56
+ More information needed
57
+
58
+ ## Intended uses & limitations
59
+
60
+ More information needed
61
+
62
+ ## Training and evaluation data
63
+
64
+ More information needed
65
+
66
+ ## Training procedure
67
+
68
+ ### Training hyperparameters
69
+
70
+ The following hyperparameters were used during training:
71
+ - learning_rate: 5e-05
72
+ - train_batch_size: 16
73
+ - eval_batch_size: 16
74
+ - seed: 42
75
+ - gradient_accumulation_steps: 4
76
+ - total_train_batch_size: 64
77
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
78
+ - lr_scheduler_type: linear
79
+ - lr_scheduler_warmup_ratio: 0.1
80
+ - num_epochs: 10
81
+
82
+ ### Training results
83
+
84
+ | Training Loss | Epoch | Step | Validation Loss | F1 | Precision | Recall | Accuracy |
85
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:---------:|:------:|:--------:|
86
+ | 0.1145 | 1.0 | 15 | 0.5836 | 0.8608 | 0.8776 | 0.8520 | 0.8613 |
87
+ | 0.129 | 2.0 | 30 | 0.8019 | 0.8322 | 0.8634 | 0.8192 | 0.8358 |
88
+ | 0.2085 | 3.0 | 45 | 0.7550 | 0.8083 | 0.8355 | 0.8042 | 0.8212 |
89
+ | 0.1722 | 4.0 | 60 | 0.7524 | 0.8298 | 0.8422 | 0.8357 | 0.8394 |
90
+ | 0.19 | 5.0 | 75 | 0.5542 | 0.8743 | 0.8910 | 0.8679 | 0.8723 |
91
+ | 0.1612 | 6.0 | 90 | 0.8325 | 0.8114 | 0.8410 | 0.8063 | 0.8066 |
92
+ | 0.2009 | 7.0 | 105 | 0.4425 | 0.8900 | 0.8904 | 0.8911 | 0.8942 |
93
+ | 0.209 | 8.0 | 120 | 0.6705 | 0.8126 | 0.8482 | 0.8074 | 0.8358 |
94
+ | 0.2188 | 9.0 | 135 | 0.5906 | 0.8387 | 0.8551 | 0.8350 | 0.8467 |
95
+ | 0.1962 | 10.0 | 150 | 0.4826 | 0.8686 | 0.8782 | 0.8635 | 0.8686 |
96
+
97
+
98
+ ### Framework versions
99
+
100
+ - Transformers 4.49.0
101
+ - Pytorch 2.6.0+cu124
102
+ - Datasets 3.3.2
103
+ - Tokenizers 0.21.0
config.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "dennisjooo/Birds-Classifier-EfficientNetB2",
3
+ "architectures": [
4
+ "EfficientNetForImageClassification"
5
+ ],
6
+ "batch_norm_eps": 0.001,
7
+ "batch_norm_momentum": 0.99,
8
+ "depth_coefficient": 1.2,
9
+ "depth_divisor": 8,
10
+ "depthwise_padding": [
11
+ 5,
12
+ 8,
13
+ 16
14
+ ],
15
+ "drop_connect_rate": 0.2,
16
+ "dropout_rate": 0.3,
17
+ "expand_ratios": [
18
+ 1,
19
+ 6,
20
+ 6,
21
+ 6,
22
+ 6,
23
+ 6,
24
+ 6
25
+ ],
26
+ "hidden_act": "swish",
27
+ "hidden_dim": 1408,
28
+ "id2label": {
29
+ "24": "American_Wigeon",
30
+ "114": "Bufflehead",
31
+ "126": "Canvasback",
32
+ "286": "Hooded_Merganser",
33
+ "339": "Mallard",
34
+ "364": "Northern_Shoveler",
35
+ "409": "Redhead",
36
+ "516": "Wood_Duck"
37
+ },
38
+ "image_size": 260,
39
+ "in_channels": [
40
+ 32,
41
+ 16,
42
+ 24,
43
+ 40,
44
+ 80,
45
+ 112,
46
+ 192
47
+ ],
48
+ "initializer_range": 0.02,
49
+ "kernel_sizes": [
50
+ 3,
51
+ 3,
52
+ 5,
53
+ 3,
54
+ 5,
55
+ 5,
56
+ 3
57
+ ],
58
+ "label2id": {
59
+ "American_Wigeon": 24,
60
+ "Bufflehead": 114,
61
+ "Canvasback": 126,
62
+ "Hooded_Merganser": 286,
63
+ "Mallard": 339,
64
+ "Northern_Shoveler": 364,
65
+ "Redhead": 409,
66
+ "Wood_Duck": 516
67
+ },
68
+ "model_type": "efficientnet",
69
+ "num_block_repeats": [
70
+ 1,
71
+ 2,
72
+ 2,
73
+ 3,
74
+ 3,
75
+ 4,
76
+ 1
77
+ ],
78
+ "num_channels": 3,
79
+ "num_hidden_layers": 64,
80
+ "out_channels": [
81
+ 16,
82
+ 24,
83
+ 40,
84
+ 80,
85
+ 112,
86
+ 192,
87
+ 320
88
+ ],
89
+ "pooling_type": "mean",
90
+ "problem_type": "single_label_classification",
91
+ "squeeze_expansion_ratio": 0.25,
92
+ "strides": [
93
+ 1,
94
+ 2,
95
+ 2,
96
+ 2,
97
+ 1,
98
+ 2,
99
+ 1
100
+ ],
101
+ "torch_dtype": "float32",
102
+ "transformers_version": "4.49.0",
103
+ "width_coefficient": 1.1
104
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5464ed9fe8671b3336d1e45c33523f53d79e3544f55ed1fd50a094872baf6cc8
3
+ size 31185464
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 289,
4
+ "width": 289
5
+ },
6
+ "do_center_crop": false,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.485,
12
+ 0.456,
13
+ 0.406
14
+ ],
15
+ "image_processor_type": "EfficientNetImageProcessor",
16
+ "image_std": [
17
+ 0.47853944,
18
+ 0.4732864,
19
+ 0.47434163
20
+ ],
21
+ "include_top": true,
22
+ "resample": 0,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "rescale_offset": false,
25
+ "size": {
26
+ "height": 260,
27
+ "width": 260
28
+ }
29
+ }
runs/Apr10_19-28-53_max-desktop/events.out.tfevents.1744327733.max-desktop.68090.13 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2b7c8761348f713a48ff0cb42c32c502ad48fa64de6701ef8b8a82388be9f4c
3
+ size 13074
runs/Apr10_19-46-48_max-desktop/events.out.tfevents.1744328808.max-desktop.68090.14 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3895f91cf663315531f33452b9698b8e1506bbb458a6ffdc416dc6248b671add
3
+ size 5994
runs/Apr10_19-47-17_max-desktop/events.out.tfevents.1744328837.max-desktop.68090.15 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a37e256246f69f30d029a2bc47383bad28b27980b1360490ebba2cc93f298930
3
+ size 13076
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d772dae5aad1c84a42616151e1086497ef9a60754f6f9ddc7f9e82fc5fa997c8
3
+ size 5304