whyoke commited on
Commit
bb16e33
·
verified ·
1 Parent(s): dae627c

Training in progress, step 20

Browse files
Files changed (3) hide show
  1. config.json +77 -0
  2. model.safetensors +3 -0
  3. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SegformerForSemanticSegmentation"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "classifier_dropout_prob": 0.1,
7
+ "decoder_hidden_size": 256,
8
+ "depths": [
9
+ 2,
10
+ 2,
11
+ 2,
12
+ 2
13
+ ],
14
+ "downsampling_rates": [
15
+ 1,
16
+ 4,
17
+ 8,
18
+ 16
19
+ ],
20
+ "drop_path_rate": 0.1,
21
+ "hidden_act": "gelu",
22
+ "hidden_dropout_prob": 0.0,
23
+ "hidden_sizes": [
24
+ 32,
25
+ 64,
26
+ 160,
27
+ 256
28
+ ],
29
+ "id2label": {
30
+ "0": "background",
31
+ "1": "capillary"
32
+ },
33
+ "image_size": 224,
34
+ "initializer_range": 0.02,
35
+ "label2id": {
36
+ "background": 0,
37
+ "capillary": 1
38
+ },
39
+ "layer_norm_eps": 1e-06,
40
+ "mlp_ratios": [
41
+ 4,
42
+ 4,
43
+ 4,
44
+ 4
45
+ ],
46
+ "model_type": "segformer",
47
+ "num_attention_heads": [
48
+ 1,
49
+ 2,
50
+ 5,
51
+ 8
52
+ ],
53
+ "num_channels": 3,
54
+ "num_encoder_blocks": 4,
55
+ "patch_sizes": [
56
+ 7,
57
+ 3,
58
+ 3,
59
+ 3
60
+ ],
61
+ "reshape_last_stage": true,
62
+ "semantic_loss_ignore_index": 255,
63
+ "sr_ratios": [
64
+ 8,
65
+ 4,
66
+ 2,
67
+ 1
68
+ ],
69
+ "strides": [
70
+ 4,
71
+ 2,
72
+ 2,
73
+ 2
74
+ ],
75
+ "torch_dtype": "float32",
76
+ "transformers_version": "4.51.3"
77
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1afe3f25144d4d7b881f59909aca23137736a966264414bab329bc7bfae855d
3
+ size 14884776
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b17e0f64e344313d65785c8b58bb1b9efc2f1c81281d1cb3898f73d6bb85e2af
3
+ size 5304